summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c40
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h3
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h6
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx.c11
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx.c6
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c8
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.c8
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c14
-rw-r--r--drivers/accessibility/speakup/main.c2
-rw-r--r--drivers/acpi/Kconfig3
-rw-r--r--drivers/acpi/Makefile8
-rw-r--r--drivers/acpi/acpica/Makefile1
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/acobject.h107
-rw-r--r--drivers/acpi/acpica/evgpeinit.c1
-rw-r--r--drivers/acpi/acpica/tbfadt.c30
-rw-r--r--drivers/acpi/acpica/tbutils.c7
-rw-r--r--drivers/acpi/acpica/utdebug.c5
-rw-r--r--drivers/acpi/apei/einj-core.c12
-rw-r--r--drivers/acpi/bus.c25
-rw-r--r--drivers/acpi/cppc_acpi.c61
-rw-r--r--drivers/acpi/dock.c48
-rw-r--r--drivers/acpi/dptf/dptf_pch_fivr.c1
-rw-r--r--drivers/acpi/dptf/dptf_power.c2
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c6
-rw-r--r--drivers/acpi/fan.h1
-rw-r--r--drivers/acpi/internal.h3
-rw-r--r--drivers/acpi/nhlt.c289
-rw-r--r--drivers/acpi/numa/srat.c82
-rw-r--r--drivers/acpi/property.c11
-rw-r--r--drivers/acpi/resource.c25
-rw-r--r--drivers/acpi/scan.c33
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/acpi/x86/Makefile8
-rw-r--r--drivers/acpi/x86/blacklist.c (renamed from drivers/acpi/blacklist.c)2
-rw-r--r--drivers/acpi/x86/cmos_rtc.c (renamed from drivers/acpi/acpi_cmos_rtc.c)2
-rw-r--r--drivers/acpi/x86/lpss.c (renamed from drivers/acpi/acpi_lpss.c)5
-rw-r--r--drivers/acpi/x86/s2idle.c8
-rw-r--r--drivers/acpi/x86/utils.c29
-rw-r--r--drivers/android/binder.c4
-rw-r--r--drivers/ata/ahci.c85
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/libata-scsi.c9
-rw-r--r--drivers/auxdisplay/Kconfig346
-rw-r--r--drivers/auxdisplay/Makefile10
-rw-r--r--drivers/auxdisplay/charlcd.c3
-rw-r--r--drivers/auxdisplay/seg-led-gpio.c6
-rw-r--r--drivers/base/arch_topology.c26
-rw-r--r--drivers/base/power/main.c6
-rw-r--r--drivers/base/power/wakeup.c11
-rw-r--r--drivers/base/regmap/internal.h14
-rw-r--r--drivers/base/regmap/regcache-maple.c2
-rw-r--r--drivers/base/regmap/regmap-i3c.c2
-rw-r--r--drivers/base/regmap/regmap-kunit.c990
-rw-r--r--drivers/base/regmap/regmap-mdio.c2
-rw-r--r--drivers/base/regmap/regmap-ram.c5
-rw-r--r--drivers/base/regmap/regmap-raw-ram.c5
-rw-r--r--drivers/base/regmap/regmap-sdw-mbq.c2
-rw-r--r--drivers/base/regmap/regmap-sdw.c2
-rw-r--r--drivers/base/regmap/regmap-spi.c1
-rw-r--r--drivers/base/regmap/regmap.c37
-rw-r--r--drivers/block/brd.c40
-rw-r--r--drivers/block/null_blk/main.c43
-rw-r--r--drivers/block/null_blk/null_blk.h2
-rw-r--r--drivers/block/null_blk/zoned.c358
-rw-r--r--drivers/block/ublk_drv.c8
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--drivers/bluetooth/btmtk.c7
-rw-r--r--drivers/bluetooth/btqca.c138
-rw-r--r--drivers/bluetooth/btqca.h3
-rw-r--r--drivers/bluetooth/btusb.c11
-rw-r--r--drivers/bluetooth/hci_qca.c29
-rw-r--r--drivers/bus/Kconfig10
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/brcmstb_gisb.c1
-rw-r--r--drivers/bus/stm32_etzpc.c141
-rw-r--r--drivers/bus/stm32_firewall.c294
-rw-r--r--drivers/bus/stm32_firewall.h83
-rw-r--r--drivers/bus/stm32_rifsc.c252
-rw-r--r--drivers/bus/ti-sysc.c165
-rw-r--r--drivers/cache/sifive_ccache.c74
-rw-r--r--drivers/char/agp/alpha-agp.c2
-rw-r--r--drivers/char/hw_random/core.c2
-rw-r--r--drivers/char/hw_random/mxc-rnga.c9
-rw-r--r--drivers/char/hw_random/stm32-rng.c18
-rw-r--r--drivers/char/random.c10
-rw-r--r--drivers/char/tpm/Kconfig17
-rw-r--r--drivers/char/tpm/Makefile2
-rw-r--r--drivers/char/tpm/eventlog/acpi.c1
-rw-r--r--drivers/char/tpm/tpm-buf.c252
-rw-r--r--drivers/char/tpm/tpm-chip.c6
-rw-r--r--drivers/char/tpm/tpm-interface.c26
-rw-r--r--drivers/char/tpm/tpm-sysfs.c18
-rw-r--r--drivers/char/tpm/tpm.h14
-rw-r--r--drivers/char/tpm/tpm2-cmd.c53
-rw-r--r--drivers/char/tpm/tpm2-sessions.c1286
-rw-r--r--drivers/char/tpm/tpm2-space.c11
-rw-r--r--drivers/char/tpm/tpm_infineon.c14
-rw-r--r--drivers/char/tpm/tpm_tis_core.c19
-rw-r--r--drivers/clk/Kconfig4
-rw-r--r--drivers/clk/clk.c173
-rw-r--r--drivers/clk/mediatek/clk-mt7988-infracfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mtk.c15
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c1
-rw-r--r--drivers/clk/qcom/gdsc.c11
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c13
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c19
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.c19
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.h3
-rw-r--r--drivers/clocksource/arm_arch_timer.c2
-rw-r--r--drivers/clocksource/renesas-ostm.c3
-rw-r--r--drivers/clocksource/timer-ti-dm.c1
-rw-r--r--drivers/comedi/drivers/vmk80xx.c35
-rw-r--r--drivers/cpufreq/amd-pstate.c280
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c5
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c14
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c10
-rw-r--r--drivers/cpufreq/cpufreq-dt.c21
-rw-r--r--drivers/cpufreq/cpufreq.c47
-rw-r--r--drivers/cpufreq/freq_table.c12
-rw-r--r--drivers/cpufreq/intel_pstate.c173
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c10
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c4
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c209
-rw-r--r--drivers/cpufreq/tegra124-cpufreq.c19
-rw-r--r--drivers/cpufreq/ti-cpufreq.c4
-rw-r--r--drivers/cpuidle/coupled.c13
-rw-r--r--drivers/cpuidle/cpuidle-kirkwood.c5
-rw-r--r--drivers/cpuidle/governors/ladder.c1
-rw-r--r--drivers/crypto/Kconfig26
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/atmel-i2c.c30
-rw-r--r--drivers/crypto/atmel-i2c.h8
-rw-r--r--drivers/crypto/atmel-sha204a.c68
-rw-r--r--drivers/crypto/bcm/spu2.c2
-rw-r--r--drivers/crypto/caam/ctrl.c19
-rw-r--r--drivers/crypto/ccp/sp-platform.c14
-rw-r--r--drivers/crypto/hisilicon/debugfs.c65
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c23
-rw-r--r--drivers/crypto/hisilicon/qm.c8
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c4
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c32
-rw-r--r--drivers/crypto/hisilicon/sgl.c5
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c24
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto.h16
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c23
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_stats.c183
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_stats.h8
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c5
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c7
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c2
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile6
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h88
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c101
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h86
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c97
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h76
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c231
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h188
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c380
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h127
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c8
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c1010
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c318
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h89
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c8
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.c12
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sriov.c7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.c21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport.c4
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_asym_algs.c66
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_bl.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_bl.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_mig_dev.c130
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c1
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.c4
-rw-r--r--drivers/crypto/mxs-dcp.c104
-rw-r--r--drivers/crypto/nx/nx-842.c6
-rw-r--r--drivers/crypto/nx/nx-842.h10
-rw-r--r--drivers/crypto/sahara.c16
-rw-r--r--drivers/crypto/starfive/Kconfig4
-rw-r--r--drivers/crypto/starfive/jh7110-aes.c597
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.c43
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.h10
-rw-r--r--drivers/crypto/starfive/jh7110-hash.c275
-rw-r--r--drivers/crypto/starfive/jh7110-rsa.c14
-rw-r--r--drivers/crypto/stm32/stm32-hash.c570
-rw-r--r--drivers/crypto/tegra/Makefile9
-rw-r--r--drivers/crypto/tegra/tegra-se-aes.c1933
-rw-r--r--drivers/crypto/tegra/tegra-se-hash.c1060
-rw-r--r--drivers/crypto/tegra/tegra-se-key.c156
-rw-r--r--drivers/crypto/tegra/tegra-se-main.c437
-rw-r--r--drivers/crypto/tegra/tegra-se.h560
-rw-r--r--drivers/cxl/acpi.c13
-rw-r--r--drivers/cxl/core/cdat.c152
-rw-r--r--drivers/cxl/core/mbox.c39
-rw-r--r--drivers/cxl/core/port.c127
-rw-r--r--drivers/cxl/core/regs.c5
-rw-r--r--drivers/cxl/cxl.h8
-rw-r--r--drivers/cxl/cxlmem.h2
-rw-r--r--drivers/dax/device.c2
-rw-r--r--drivers/devfreq/event/exynos-nocp.c6
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c6
-rw-r--r--drivers/devfreq/exynos-bus.c9
-rw-r--r--drivers/devfreq/mtk-cci-devfreq.c6
-rw-r--r--drivers/devfreq/rk3399_dmc.c6
-rw-r--r--drivers/devfreq/sun8i-a33-mbus.c6
-rw-r--r--drivers/dma/idma64.c4
-rw-r--r--drivers/dma/idxd/cdev.c82
-rw-r--r--drivers/dma/idxd/debugfs.c4
-rw-r--r--drivers/dma/idxd/device.c8
-rw-r--r--drivers/dma/idxd/idxd.h5
-rw-r--r--drivers/dma/idxd/init.c6
-rw-r--r--drivers/dma/idxd/irq.c4
-rw-r--r--drivers/dma/idxd/perfmon.c9
-rw-r--r--drivers/dma/idxd/registers.h3
-rw-r--r--drivers/dma/idxd/sysfs.c27
-rw-r--r--drivers/dma/owl-dma.c4
-rw-r--r--drivers/dma/pl330.c3
-rw-r--r--drivers/dma/tegra186-gpc-dma.c3
-rw-r--r--drivers/dma/xilinx/xdma-regs.h3
-rw-r--r--drivers/dma/xilinx/xdma.c42
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c13
-rw-r--r--drivers/dpll/dpll_core.c58
-rw-r--r--drivers/edac/altera_edac.c8
-rw-r--r--drivers/edac/amd64_edac.h1
-rw-r--r--drivers/edac/amd8111_edac.c3
-rw-r--r--drivers/edac/armada_xp_edac.c2
-rw-r--r--drivers/edac/cpc925_edac.c2
-rw-r--r--drivers/edac/edac_device.c53
-rw-r--r--drivers/edac/edac_device.h22
-rw-r--r--drivers/edac/edac_device_sysfs.c22
-rw-r--r--drivers/edac/edac_mc_sysfs.c47
-rw-r--r--drivers/edac/edac_pci.h5
-rw-r--r--drivers/edac/highbank_l2_edac.c2
-rw-r--r--drivers/edac/mpc85xx_edac.c2
-rw-r--r--drivers/edac/octeon_edac-l2c.c2
-rw-r--r--drivers/edac/octeon_edac-pc.c2
-rw-r--r--drivers/edac/qcom_edac.c1
-rw-r--r--drivers/edac/sifive_edac.c3
-rw-r--r--drivers/edac/skx_common.c2
-rw-r--r--drivers/edac/synopsys_edac.c50
-rw-r--r--drivers/edac/thunderx_edac.c6
-rw-r--r--drivers/edac/versal_edac.c12
-rw-r--r--drivers/edac/xgene_edac.c10
-rw-r--r--drivers/edac/zynqmp_edac.c2
-rw-r--r--drivers/eisa/Kconfig9
-rw-r--r--drivers/eisa/virtual_root.c2
-rw-r--r--drivers/firewire/nosy.c6
-rw-r--r--drivers/firewire/ohci.c8
-rw-r--r--drivers/firmware/arm_ffa/driver.c189
-rw-r--r--drivers/firmware/arm_scmi/Makefile3
-rw-r--r--drivers/firmware/arm_scmi/common.h11
-rw-r--r--drivers/firmware/arm_scmi/driver.c269
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c3
-rw-r--r--drivers/firmware/arm_scmi/notify.c30
-rw-r--r--drivers/firmware/arm_scmi/perf.c15
-rw-r--r--drivers/firmware/arm_scmi/pinctrl.c916
-rw-r--r--drivers/firmware/arm_scmi/powercap.c2
-rw-r--r--drivers/firmware/arm_scmi/protocols.h18
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c7
-rw-r--r--drivers/firmware/efi/unaccepted_memory.c4
-rw-r--r--drivers/firmware/google/cbmem.c1
-rw-r--r--drivers/firmware/google/coreboot_table.c6
-rw-r--r--drivers/firmware/google/coreboot_table.h6
-rw-r--r--drivers/firmware/microchip/mpfs-auto-update.c8
-rw-r--r--drivers/firmware/qcom/qcom_qseecom_uefisecapp.c137
-rw-r--r--drivers/firmware/qcom/qcom_scm.c118
-rw-r--r--drivers/firmware/raspberrypi.c7
-rw-r--r--drivers/firmware/smccc/smccc.c1
-rw-r--r--drivers/firmware/ti_sci.c24
-rw-r--r--drivers/fpga/dfl-pci.c3
-rw-r--r--drivers/gpio/gpio-crystalcove.c2
-rw-r--r--drivers/gpio/gpio-lpc32xx.c1
-rw-r--r--drivers/gpio/gpio-tangier.c9
-rw-r--r--drivers/gpio/gpio-tegra186.c20
-rw-r--r--drivers/gpio/gpio-wcove.c2
-rw-r--r--drivers/gpio/gpiolib-cdev.c18
-rw-r--r--drivers/gpio/gpiolib.c58
-rw-r--r--drivers/gpio/gpiolib.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c53
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c48
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c16
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c1
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c27
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h33
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h55
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c25
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c347
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c3
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c3
-rw-r--r--drivers/gpu/drm/drm_connector.c2
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c24
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h12
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_hwdb.c34
-rw-r--r--drivers/gpu/drm/gma500/Makefile1
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c5
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h9
-rw-r--r--drivers/gpu/drm/gma500/psb_lid.c80
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c113
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c4
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_mips.h5
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c70
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h34
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c8
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c6
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c6
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c79
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c7
-rw-r--r--drivers/gpu/drm/panel/Kconfig2
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c13
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672e.c2
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c13
-rw-r--r--drivers/gpu/drm/radeon/pptable.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c38
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c32
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c44
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h3
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.c8
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c5
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.c19
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c14
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_huc.c9
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c4
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c5
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c8
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c24
-rw-r--r--drivers/gpu/host1x/bus.c8
-rw-r--r--drivers/gpu/host1x/dev.c24
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/hid-mcp2221.c2
-rw-r--r--drivers/hid/hid-nintendo.c8
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c38
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c2
-rw-r--r--drivers/hv/channel.c29
-rw-r--r--drivers/hv/connection.c29
-rw-r--r--drivers/hv/vmbus_drv.c94
-rw-r--r--drivers/hwmon/corsair-cpro.c45
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c6
-rw-r--r--drivers/i2c/i2c-core-base.c12
-rw-r--r--drivers/iio/accel/mxc4005.c92
-rw-r--r--drivers/iio/imu/adis16475.c4
-rw-r--r--drivers/iio/pressure/bmp280-core.c1
-rw-r--r--drivers/iio/pressure/bmp280-spi.c13
-rw-r--r--drivers/iio/pressure/bmp280.h1
-rw-r--r--drivers/infiniband/core/cm.c11
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c2
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/misc/atlas_btns.c1
-rw-r--r--drivers/input/mouse/amimouse.c8
-rw-r--r--drivers/input/serio/i8042-io.h5
-rw-r--r--drivers/interconnect/core.c8
-rw-r--r--drivers/interconnect/qcom/x1e80100.c26
-rw-r--r--drivers/iommu/amd/init.c25
-rw-r--r--drivers/iommu/amd/iommu.c15
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c4
-rw-r--r--drivers/iommu/intel/iommu.c11
-rw-r--r--drivers/iommu/intel/irq_remapping.c113
-rw-r--r--drivers/iommu/intel/perfmon.c2
-rw-r--r--drivers/iommu/intel/svm.c2
-rw-r--r--drivers/iommu/iommufd/Kconfig1
-rw-r--r--drivers/iommu/irq_remapping.c5
-rw-r--r--drivers/iommu/mtk_iommu.c1
-rw-r--r--drivers/iommu/mtk_iommu_v1.c1
-rw-r--r--drivers/irqchip/Kconfig27
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-alpine-msi.c2
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c6
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c17
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c34
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c12
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c2
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c76
-rw-r--r--drivers/irqchip/irq-mxs.c2
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c28
-rw-r--r--drivers/irqchip/irq-riscv-aplic-direct.c323
-rw-r--r--drivers/irqchip/irq-riscv-aplic-main.c211
-rw-r--r--drivers/irqchip/irq-riscv-aplic-main.h52
-rw-r--r--drivers/irqchip/irq-riscv-aplic-msi.c257
-rw-r--r--drivers/irqchip/irq-riscv-imsic-early.c201
-rw-r--r--drivers/irqchip/irq-riscv-imsic-platform.c375
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.c865
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.h108
-rw-r--r--drivers/irqchip/irq-sifive-plic.c7
-rw-r--r--drivers/irqchip/irq-stm32-exti.c139
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c1
-rw-r--r--drivers/irqchip/irq-tb10x.c1
-rw-r--r--drivers/isdn/mISDN/socket.c10
-rw-r--r--drivers/macintosh/via-macii.c11
-rw-r--r--drivers/md/bcache/bset.c44
-rw-r--r--drivers/md/bcache/bset.h28
-rw-r--r--drivers/md/bcache/btree.c40
-rw-r--r--drivers/md/bcache/super.c15
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/bcache/writeback.c10
-rw-r--r--drivers/md/dm-bio-prison-v2.c3
-rw-r--r--drivers/md/dm-cache-target.c12
-rw-r--r--drivers/md/dm-clone-target.c14
-rw-r--r--drivers/md/dm-core.h2
-rw-r--r--drivers/md/dm-era-target.c3
-rw-r--r--drivers/md/dm-mpath.c3
-rw-r--r--drivers/md/dm-table.c3
-rw-r--r--drivers/md/dm-thin.c12
-rw-r--r--drivers/md/dm-vdo/data-vio.c3
-rw-r--r--drivers/md/dm-vdo/flush.c3
-rw-r--r--drivers/md/dm-vdo/murmurhash3.c2
-rw-r--r--drivers/md/dm-zone.c501
-rw-r--r--drivers/md/dm.c82
-rw-r--r--drivers/md/dm.h2
-rw-r--r--drivers/md/md-bitmap.c6
-rw-r--r--drivers/md/md.c7
-rw-r--r--drivers/md/md.h3
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/md/raid5.c15
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c8
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c5
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c11
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c4
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c5
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h2
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c2
-rw-r--r--drivers/memory/brcmstb_memc.c1
-rw-r--r--drivers/memory/mtk-smi.c2
-rw-r--r--drivers/message/fusion/mptsas.c14
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c2
-rw-r--r--drivers/misc/eeprom/at24.c18
-rw-r--r--drivers/misc/lkdtm/Makefile2
-rw-r--r--drivers/misc/lkdtm/perms.c2
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/pci-me.c4
-rw-r--r--drivers/misc/mei/platform-vsc.c17
-rw-r--r--drivers/misc/mei/pxp/mei_pxp.c7
-rw-r--r--drivers/misc/mei/vsc-tp.c84
-rw-r--r--drivers/misc/mei/vsc-tp.h3
-rw-r--r--drivers/misc/pvpanic/pvpanic-pci.c4
-rw-r--r--drivers/mmc/host/moxart-mmc.c1
-rw-r--r--drivers/mmc/host/omap.c48
-rw-r--r--drivers/mmc/host/sdhci-msm.c16
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c1
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c2
-rw-r--r--drivers/mtd/mtdcore.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c2
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c4
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c7
-rw-r--r--drivers/net/dsa/mt7530.c262
-rw-r--r--drivers/net/dsa/mt7530.h10
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c99
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h23
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c35
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_xdp.c4
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c13
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h2
-rw-r--r--drivers/net/ethernet/amd/pds_core/dev.c3
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c1
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c21
-rw-r--r--drivers/net/ethernet/broadcom/b44.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c84
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c16
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c8
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c52
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c16
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_leds.c38
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/selq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c115
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h5
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h3
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c24
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c11
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c11
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c61
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c14
-rw-r--r--drivers/net/ethernet/realtek/r8169.h6
-rw-r--r--drivers/net/ethernet/realtek/r8169_leds.c35
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c9
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c79
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c29
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c18
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c5
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c8
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c8
-rw-r--r--drivers/net/fjes/fjes_main.c1
-rw-r--r--drivers/net/geneve.c4
-rw-r--r--drivers/net/gtp.c3
-rw-r--r--drivers/net/hyperv/netvsc.c7
-rw-r--r--drivers/net/macsec.c46
-rw-r--r--drivers/net/phy/dp83869.c3
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c119
-rw-r--r--drivers/net/phy/mediatek-ge-soc.c43
-rw-r--r--drivers/net/tap.c2
-rw-r--r--drivers/net/tun.c20
-rw-r--r--drivers/net/usb/ax88179_178a.c15
-rw-r--r--drivers/net/usb/qmi_wwan.c4
-rw-r--r--drivers/net/virtio_net.c26
-rw-r--r--drivers/net/vxlan/vxlan_core.c53
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c3
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c2
-rw-r--r--drivers/net/xen-netback/common.h5
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netback/netback.c11
-rw-r--r--drivers/nfc/trf7970a.c42
-rw-r--r--drivers/nvme/host/core.c25
-rw-r--r--drivers/nvme/host/ioctl.c15
-rw-r--r--drivers/nvme/host/multipath.c3
-rw-r--r--drivers/nvme/host/nvme.h26
-rw-r--r--drivers/nvme/host/pci.c22
-rw-r--r--drivers/nvme/host/tcp.c10
-rw-r--r--drivers/nvme/target/auth.c10
-rw-r--r--drivers/nvme/target/configfs.c12
-rw-r--r--drivers/nvme/target/core.c8
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/rdma.c16
-rw-r--r--drivers/nvme/target/tcp.c11
-rw-r--r--drivers/nvme/target/zns.c10
-rw-r--r--drivers/of/property.c2
-rw-r--r--drivers/opp/of.c17
-rw-r--r--drivers/pci/pcie/aspm.c4
-rw-r--r--drivers/pci/quirks.c8
-rw-r--r--drivers/perf/alibaba_uncore_drw_pmu.c11
-rw-r--r--drivers/perf/amlogic/meson_ddr_pmu_core.c1
-rw-r--r--drivers/perf/arm-cci.c1
-rw-r--r--drivers/perf/arm-ccn.c1
-rw-r--r--drivers/perf/arm-cmn.c11
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.c9
-rw-r--r--drivers/perf/arm_dmc620_pmu.c1
-rw-r--r--drivers/perf/arm_dsu_pmu.c20
-rw-r--r--drivers/perf/arm_pmu_platform.c1
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c1
-rw-r--r--drivers/perf/arm_spe_pmu.c1
-rw-r--r--drivers/perf/dwc_pcie_pmu.c10
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c1
-rw-r--r--drivers/perf/hisilicon/hisi_pcie_pmu.c24
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c7
-rw-r--r--drivers/perf/hisilicon/hns3_pmu.c17
-rw-r--r--drivers/perf/qcom_l2_pmu.c9
-rw-r--r--drivers/perf/qcom_l3_pmu.c1
-rw-r--r--drivers/perf/riscv_pmu_legacy.c1
-rw-r--r--drivers/perf/riscv_pmu_sbi.c2
-rw-r--r--drivers/perf/thunderx2_pmu.c30
-rw-r--r--drivers/perf/xgene_pmu.c1
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c6
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-comphy.c9
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c12
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h1
-rw-r--r--drivers/phy/rockchip/Kconfig1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c36
-rw-r--r--drivers/phy/rockchip/phy-rockchip-snps-pcie3.c31
-rw-r--r--drivers/phy/ti/phy-tusb1210.c23
-rw-r--r--drivers/pinctrl/Kconfig15
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c34
-rw-r--r--drivers/pinctrl/core.c8
-rw-r--r--drivers/pinctrl/devicetree.c10
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c78
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c40
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-a1.c6
-rw-r--r--drivers/pinctrl/pinctrl-scmi.c571
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c14
-rw-r--r--drivers/platform/chrome/Kconfig1
-rw-r--r--drivers/platform/chrome/cros_ec.c16
-rw-r--r--drivers/platform/chrome/cros_ec_chardev.c9
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c9
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c9
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c81
-rw-r--r--drivers/platform/chrome/cros_ec_proto_test.c72
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub.c9
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c9
-rw-r--r--drivers/platform/chrome/cros_ec_uart.c28
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c9
-rw-r--r--drivers/platform/chrome/cros_hps_i2c.c4
-rw-r--r--drivers/platform/chrome/cros_kbd_led_backlight.c11
-rw-r--r--drivers/platform/chrome/cros_usbpd_logger.c9
-rw-r--r--drivers/platform/chrome/cros_usbpd_notify.c9
-rw-r--r--drivers/platform/chrome/wilco_ec/Kconfig1
-rw-r--r--drivers/platform/chrome/wilco_ec/core.c9
-rw-r--r--drivers/platform/chrome/wilco_ec/debugfs.c9
-rw-r--r--drivers/platform/chrome/wilco_ec/event.c2
-rw-r--r--drivers/platform/chrome/wilco_ec/sysfs.c2
-rw-r--r--drivers/platform/chrome/wilco_ec/telemetry.c9
-rw-r--r--drivers/platform/x86/acer-wmi.c9
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c9
-rw-r--r--drivers/platform/x86/amd/pmf/Makefile2
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c7
-rw-r--r--drivers/platform/x86/amd/pmf/core.c1
-rw-r--r--drivers/platform/x86/amd/pmf/pmf-quirks.c51
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h3
-rw-r--r--drivers/platform/x86/asus-laptop.c1
-rw-r--r--drivers/platform/x86/classmate-laptop.c5
-rw-r--r--drivers/platform/x86/dell/dell-rbtn.c1
-rw-r--r--drivers/platform/x86/eeepc-laptop.c1
-rw-r--r--drivers/platform/x86/intel/hid.c9
-rw-r--r--drivers/platform/x86/intel/rst.c1
-rw-r--r--drivers/platform/x86/intel/smartconnect.c1
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c2
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c4
-rw-r--r--drivers/platform/x86/intel/vbtn.c11
-rw-r--r--drivers/platform/x86/lg-laptop.c3
-rw-r--r--drivers/platform/x86/sony-laptop.c2
-rw-r--r--drivers/platform/x86/toshiba_acpi.c5
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c1
-rw-r--r--drivers/platform/x86/toshiba_haps.c1
-rw-r--r--drivers/platform/x86/wireless-hotkey.c1
-rw-r--r--drivers/pnp/isapnp/Kconfig2
-rw-r--r--drivers/power/supply/mt6360_charger.c2
-rw-r--r--drivers/power/supply/rt9455_charger.c2
-rw-r--r--drivers/powercap/dtpm_cpu.c8
-rw-r--r--drivers/powercap/intel_rapl_common.c607
-rw-r--r--drivers/powercap/intel_rapl_tpmi.c3
-rw-r--r--drivers/ptp/ptp_vmw.c1
-rw-r--r--drivers/pwm/pwm-dwc-core.c1
-rw-r--r--drivers/pwm/pwm-dwc.c88
-rw-r--r--drivers/pwm/pwm-dwc.h6
-rw-r--r--drivers/regulator/core.c27
-rw-r--r--drivers/regulator/irq_helpers.c3
-rw-r--r--drivers/regulator/mt6360-regulator.c32
-rw-r--r--drivers/regulator/qcom-refgen-regulator.c1
-rw-r--r--drivers/regulator/rtq2208-regulator.c7
-rw-r--r--drivers/regulator/vqmmc-ipq4019-regulator.c1
-rw-r--r--drivers/reset/Kconfig4
-rw-r--r--drivers/s390/char/Makefile2
-rw-r--r--drivers/s390/char/raw3270.c6
-rw-r--r--drivers/s390/cio/chp.c141
-rw-r--r--drivers/s390/cio/chp.h2
-rw-r--r--drivers/s390/cio/chsc.c122
-rw-r--r--drivers/s390/cio/chsc.h5
-rw-r--r--drivers/s390/cio/cio_inject.c2
-rw-r--r--drivers/s390/cio/css.c14
-rw-r--r--drivers/s390/cio/css.h13
-rw-r--r--drivers/s390/cio/device.c13
-rw-r--r--drivers/s390/cio/device_fsm.c5
-rw-r--r--drivers/s390/cio/device_ops.c8
-rw-r--r--drivers/s390/cio/qdio_main.c28
-rw-r--r--drivers/s390/cio/trace.h2
-rw-r--r--drivers/s390/crypto/Makefile2
-rw-r--r--drivers/s390/crypto/ap_bus.c238
-rw-r--r--drivers/s390/crypto/ap_bus.h22
-rw-r--r--drivers/s390/crypto/ap_queue.c4
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c224
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h6
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c6
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c48
-rw-r--r--drivers/s390/net/ism_drv.c37
-rw-r--r--drivers/s390/net/qeth_core_main.c61
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c10
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c6
-rw-r--r--drivers/scsi/scsi_lib.c8
-rw-r--r--drivers/scsi/sd.c12
-rw-r--r--drivers/scsi/sd.h19
-rw-r--r--drivers/scsi/sd_zbc.c335
-rw-r--r--drivers/scsi/sg.c18
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c6
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/canaan/Kconfig4
-rw-r--r--drivers/soc/hisilicon/Kconfig2
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.c6
-rw-r--r--drivers/soc/mediatek/Kconfig1
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c163
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c41
-rw-r--r--drivers/soc/mediatek/mtk-socinfo.c14
-rw-r--r--drivers/soc/mediatek/mtk-svs.c7
-rw-r--r--drivers/soc/qcom/cmd-db.c41
-rw-r--r--drivers/soc/qcom/icc-bwmon.c8
-rw-r--r--drivers/soc/qcom/pmic_glink.c26
-rw-r--r--drivers/soc/qcom/pmic_pdcharger_ulog.c4
-rw-r--r--drivers/soc/qcom/qcom_stats.c4
-rw-r--r--drivers/soc/qcom/rpm_master_stats.c4
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c5
-rw-r--r--drivers/soc/qcom/socinfo.c2
-rw-r--r--drivers/soc/renesas/Kconfig6
-rw-r--r--drivers/soc/renesas/renesas-soc.c20
-rw-r--r--drivers/soc/samsung/exynos-asv.c10
-rw-r--r--drivers/soc/tegra/pmc.c2
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c7
-rw-r--r--drivers/soundwire/amd_manager.c15
-rw-r--r--drivers/soundwire/amd_manager.h3
-rw-r--r--drivers/spi/spi-axi-spi-engine.c2
-rw-r--r--drivers/spi/spi-hisi-kunpeng.c2
-rw-r--r--drivers/spi/spi-microchip-core-qspi.c1
-rw-r--r--drivers/spi/spi-stm32.c14
-rw-r--r--drivers/spi/spi.c1
-rw-r--r--drivers/target/target_core_configfs.c12
-rw-r--r--drivers/tee/Kconfig1
-rw-r--r--drivers/tee/Makefile1
-rw-r--r--drivers/tee/amdtee/amdtee_private.h2
-rw-r--r--drivers/tee/amdtee/call.c2
-rw-r--r--drivers/tee/amdtee/core.c3
-rw-r--r--drivers/tee/amdtee/shm_pool.c2
-rw-r--r--drivers/tee/optee/call.c2
-rw-r--r--drivers/tee/optee/core.c66
-rw-r--r--drivers/tee/optee/device.c2
-rw-r--r--drivers/tee/optee/ffa_abi.c8
-rw-r--r--drivers/tee/optee/notif.c2
-rw-r--r--drivers/tee/optee/optee_private.h14
-rw-r--r--drivers/tee/optee/rpc.c2
-rw-r--r--drivers/tee/optee/smc_abi.c17
-rw-r--r--drivers/tee/tee_core.c2
-rw-r--r--drivers/tee/tee_private.h35
-rw-r--r--drivers/tee/tee_shm.c67
-rw-r--r--drivers/tee/tee_shm_pool.c2
-rw-r--r--drivers/tee/tstee/Kconfig11
-rw-r--r--drivers/tee/tstee/Makefile3
-rw-r--r--drivers/tee/tstee/core.c480
-rw-r--r--drivers/tee/tstee/tstee_private.h92
-rw-r--r--drivers/thermal/amlogic_thermal.c10
-rw-r--r--drivers/thermal/armada_thermal.c9
-rw-r--r--drivers/thermal/cpufreq_cooling.c3
-rw-r--r--drivers/thermal/gov_bang_bang.c97
-rw-r--r--drivers/thermal/gov_fair_share.c82
-rw-r--r--drivers/thermal/gov_power_allocator.c45
-rw-r--r--drivers/thermal/gov_step_wise.c98
-rw-r--r--drivers/thermal/gov_user_space.c10
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c4
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_power_floor.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_wt_hint.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_wt_req.c1
-rw-r--r--drivers/thermal/intel/intel_hfi.c113
-rw-r--r--drivers/thermal/intel/intel_soc_dts_iosf.c1
-rw-r--r--drivers/thermal/k3_bandgap.c1
-rw-r--r--drivers/thermal/loongson2_thermal.c117
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c438
-rw-r--r--drivers/thermal/qcom/lmh.c3
-rw-r--r--drivers/thermal/qcom/qcom-spmi-temp-alarm.c1
-rw-r--r--drivers/thermal/qcom/tsens-v2.c1
-rw-r--r--drivers/thermal/qcom/tsens.c33
-rw-r--r--drivers/thermal/qcom/tsens.h5
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c165
-rw-r--r--drivers/thermal/thermal_core.c180
-rw-r--r--drivers/thermal/thermal_core.h121
-rw-r--r--drivers/thermal/thermal_debugfs.c171
-rw-r--r--drivers/thermal/thermal_debugfs.h8
-rw-r--r--drivers/thermal/thermal_helpers.c8
-rw-r--r--drivers/thermal/thermal_netlink.c68
-rw-r--r--drivers/thermal/thermal_netlink.h26
-rw-r--r--drivers/thermal/thermal_sysfs.c20
-rw-r--r--drivers/thermal/thermal_trace.h2
-rw-r--r--drivers/thermal/thermal_trace_ipa.h2
-rw-r--r--drivers/thermal/thermal_trip.c16
-rw-r--r--drivers/thunderbolt/switch.c48
-rw-r--r--drivers/thunderbolt/tb.c10
-rw-r--r--drivers/thunderbolt/tb.h3
-rw-r--r--drivers/thunderbolt/usb4.c13
-rw-r--r--drivers/tty/serial/8250/8250.h3
-rw-r--r--drivers/tty/serial/8250/8250_alpha.c21
-rw-r--r--drivers/tty/serial/8250/8250_core.c4
-rw-r--r--drivers/tty/serial/8250/8250_dw.c6
-rw-r--r--drivers/tty/serial/8250/8250_lpc18xx.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c6
-rw-r--r--drivers/tty/serial/8250/Makefile2
-rw-r--r--drivers/tty/serial/mxs-auart.c8
-rw-r--r--drivers/tty/serial/pmac_zilog.c14
-rw-r--r--drivers/tty/serial/serial_base.h4
-rw-r--r--drivers/tty/serial/serial_core.c23
-rw-r--r--drivers/tty/serial/serial_port.c34
-rw-r--r--drivers/tty/serial/stm32-usart.c13
-rw-r--r--drivers/ufs/host/ufs-qcom.c8
-rw-r--r--drivers/uio/uio_hv_generic.c12
-rw-r--r--drivers/usb/class/cdc-wdm.c6
-rw-r--r--drivers/usb/core/hub.c5
-rw-r--r--drivers/usb/core/port.c12
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c4
-rw-r--r--drivers/usb/dwc3/core.c90
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/ep0.c3
-rw-r--r--drivers/usb/dwc3/gadget.c2
-rw-r--r--drivers/usb/dwc3/host.c27
-rw-r--r--drivers/usb/gadget/composite.c6
-rw-r--r--drivers/usb/gadget/function/f_fs.c38
-rw-r--r--drivers/usb/gadget/function/f_ncm.c4
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c4
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c5
-rw-r--r--drivers/usb/host/ohci-hcd.c8
-rw-r--r--drivers/usb/host/xhci-plat.h4
-rw-r--r--drivers/usb/host/xhci-ring.c9
-rw-r--r--drivers/usb/host/xhci-rzv2m.c1
-rw-r--r--drivers/usb/host/xhci-trace.h12
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c6
-rw-r--r--drivers/usb/serial/option.c40
-rw-r--r--drivers/usb/typec/mux/it5205.c2
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c8
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c11
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c46
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c6
-rw-r--r--drivers/vdpa/vdpa.c6
-rw-r--r--drivers/vfio/pci/vfio_pci.c2
-rw-r--r--drivers/vhost/net.c8
-rw-r--r--drivers/vhost/vhost.c26
-rw-r--r--drivers/video/fbdev/core/fb_defio.c2
-rw-r--r--drivers/virt/acrn/ioreq.c2
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c28
-rw-r--r--drivers/virt/vmgenid.c3
-rw-r--r--drivers/virtio/virtio.c6
-rw-r--r--drivers/zorro/zorro.c14
983 files changed, 27985 insertions, 8355 deletions
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 39f6d1b98fd6..51d3f1a55d02 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/firmware.h>
@@ -131,22 +131,6 @@ static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param
return 0;
}
-static int ivpu_get_core_clock_rate(struct ivpu_device *vdev, u64 *clk_rate)
-{
- int ret;
-
- ret = ivpu_rpm_get_if_active(vdev);
- if (ret < 0)
- return ret;
-
- *clk_rate = ret ? ivpu_hw_reg_pll_freq_get(vdev) : 0;
-
- if (ret)
- ivpu_rpm_put(vdev);
-
- return 0;
-}
-
static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
@@ -170,7 +154,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = vdev->platform;
break;
case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
- ret = ivpu_get_core_clock_rate(vdev, &args->value);
+ args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio);
break;
case DRM_IVPU_PARAM_NUM_CONTEXTS:
args->value = ivpu_get_context_count(vdev);
@@ -387,12 +371,15 @@ int ivpu_shutdown(struct ivpu_device *vdev)
{
int ret;
- ivpu_prepare_for_reset(vdev);
+ /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
+ pci_save_state(to_pci_dev(vdev->drm.dev));
ret = ivpu_hw_power_down(vdev);
if (ret)
ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+
return ret;
}
@@ -530,7 +517,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
atomic64_set(&vdev->unique_id_counter, 0);
- xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
+ xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
@@ -560,11 +547,11 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
/* Power up early so the rest of init code can access VPU registers */
ret = ivpu_hw_power_up(vdev);
if (ret)
- goto err_power_down;
+ goto err_shutdown;
ret = ivpu_mmu_global_context_init(vdev);
if (ret)
- goto err_power_down;
+ goto err_shutdown;
ret = ivpu_mmu_init(vdev);
if (ret)
@@ -601,10 +588,8 @@ err_mmu_rctx_fini:
ivpu_mmu_reserved_context_fini(vdev);
err_mmu_gctx_fini:
ivpu_mmu_global_context_fini(vdev);
-err_power_down:
- ivpu_hw_power_down(vdev);
- if (IVPU_WA(d3hot_after_power_off))
- pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+err_shutdown:
+ ivpu_shutdown(vdev);
err_xa_destroy:
xa_destroy(&vdev->db_xa);
xa_destroy(&vdev->submitted_jobs_xa);
@@ -628,9 +613,8 @@ static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
static void ivpu_dev_fini(struct ivpu_device *vdev)
{
ivpu_pm_disable(vdev);
+ ivpu_prepare_for_reset(vdev);
ivpu_shutdown(vdev);
- if (IVPU_WA(d3hot_after_power_off))
- pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
ivpu_jobs_abort_all(vdev);
ivpu_job_done_consumer_fini(vdev);
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 7be0500d9bb8..bb4374d0eaec 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_DRV_H__
@@ -90,7 +90,6 @@
struct ivpu_wa_table {
bool punit_disabled;
bool clear_runtime_mem;
- bool d3hot_after_power_off;
bool interrupt_clear_with_0;
bool disable_clock_relinquish;
bool disable_d0i3_msg;
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index b2909168a0a6..094c659d2800 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -21,6 +21,7 @@ struct ivpu_hw_ops {
u32 (*profiling_freq_get)(struct ivpu_device *vdev);
void (*profiling_freq_drive)(struct ivpu_device *vdev, bool enable);
u32 (*reg_pll_freq_get)(struct ivpu_device *vdev);
+ u32 (*ratio_to_freq)(struct ivpu_device *vdev, u32 ratio);
u32 (*reg_telemetry_offset_get)(struct ivpu_device *vdev);
u32 (*reg_telemetry_size_get)(struct ivpu_device *vdev);
u32 (*reg_telemetry_enable_get)(struct ivpu_device *vdev);
@@ -130,6 +131,11 @@ static inline u32 ivpu_hw_reg_pll_freq_get(struct ivpu_device *vdev)
return vdev->hw->ops->reg_pll_freq_get(vdev);
};
+static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
+{
+ return vdev->hw->ops->ratio_to_freq(vdev, ratio);
+}
+
static inline u32 ivpu_hw_reg_telemetry_offset_get(struct ivpu_device *vdev)
{
return vdev->hw->ops->reg_telemetry_offset_get(vdev);
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
index 9a0c9498baba..bd25e2d9fb0f 100644
--- a/drivers/accel/ivpu/ivpu_hw_37xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include "ivpu_drv.h"
@@ -75,7 +75,6 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
{
vdev->wa.punit_disabled = false;
vdev->wa.clear_runtime_mem = false;
- vdev->wa.d3hot_after_power_off = true;
REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, BUTTRESS_ALL_IRQ_MASK);
if (REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) == BUTTRESS_ALL_IRQ_MASK) {
@@ -86,7 +85,6 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
IVPU_PRINT_WA(punit_disabled);
IVPU_PRINT_WA(clear_runtime_mem);
- IVPU_PRINT_WA(d3hot_after_power_off);
IVPU_PRINT_WA(interrupt_clear_with_0);
}
@@ -805,12 +803,12 @@ static void ivpu_hw_37xx_profiling_freq_drive(struct ivpu_device *vdev, bool ena
/* Profiling freq - is a debug feature. Unavailable on VPU 37XX. */
}
-static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config)
+static u32 ivpu_hw_37xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
{
u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
u32 cpu_clock;
- if ((config & 0xff) == PLL_RATIO_4_3)
+ if ((vdev->hw->config & 0xff) == PLL_RATIO_4_3)
cpu_clock = pll_clock * 2 / 4;
else
cpu_clock = pll_clock * 2 / 5;
@@ -829,7 +827,7 @@ static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev)
if (!ivpu_is_silicon(vdev))
return PLL_SIMULATION_FREQ;
- return ivpu_hw_37xx_pll_to_freq(pll_curr_ratio, vdev->hw->config);
+ return ivpu_hw_37xx_ratio_to_freq(vdev, pll_curr_ratio);
}
static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
@@ -1052,6 +1050,7 @@ const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
.profiling_freq_get = ivpu_hw_37xx_profiling_freq_get,
.profiling_freq_drive = ivpu_hw_37xx_profiling_freq_drive,
.reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get,
+ .ratio_to_freq = ivpu_hw_37xx_ratio_to_freq,
.reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get,
.reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get,
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
index e4eddbf5d11c..b0b88d4c8926 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
@@ -980,6 +980,11 @@ static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev)
return PLL_RATIO_TO_FREQ(pll_curr_ratio);
}
+static u32 ivpu_hw_40xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
+{
+ return PLL_RATIO_TO_FREQ(ratio);
+}
+
static u32 ivpu_hw_40xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
{
return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
@@ -1230,6 +1235,7 @@ const struct ivpu_hw_ops ivpu_hw_40xx_ops = {
.profiling_freq_get = ivpu_hw_40xx_profiling_freq_get,
.profiling_freq_drive = ivpu_hw_40xx_profiling_freq_drive,
.reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get,
+ .ratio_to_freq = ivpu_hw_40xx_ratio_to_freq,
.reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get,
.reg_telemetry_enable_get = ivpu_hw_40xx_reg_telemetry_enable_get,
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index 04ac4b9840fb..56ff067f63e2 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/genalloc.h>
@@ -501,7 +501,11 @@ int ivpu_ipc_init(struct ivpu_device *vdev)
spin_lock_init(&ipc->cons_lock);
INIT_LIST_HEAD(&ipc->cons_list);
INIT_LIST_HEAD(&ipc->cb_msg_list);
- drmm_mutex_init(&vdev->drm, &ipc->lock);
+ ret = drmm_mutex_init(&vdev->drm, &ipc->lock);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize ipc->lock, ret %d\n", ret);
+ goto err_free_rx;
+ }
ivpu_ipc_reset(vdev);
return 0;
diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
index 91bd640655ab..2e46b322c450 100644
--- a/drivers/accel/ivpu/ivpu_mmu.c
+++ b/drivers/accel/ivpu/ivpu_mmu.c
@@ -278,7 +278,7 @@ static const char *ivpu_mmu_event_to_str(u32 cmd)
case IVPU_MMU_EVT_F_VMS_FETCH:
return "Fetch of VMS caused external abort";
default:
- return "Unknown CMDQ command";
+ return "Unknown event";
}
}
@@ -286,15 +286,15 @@ static const char *ivpu_mmu_cmdq_err_to_str(u32 err)
{
switch (err) {
case IVPU_MMU_CERROR_NONE:
- return "No CMDQ Error";
+ return "No error";
case IVPU_MMU_CERROR_ILL:
return "Illegal command";
case IVPU_MMU_CERROR_ABT:
- return "External abort on CMDQ read";
+ return "External abort on command queue read";
case IVPU_MMU_CERROR_ATC_INV_SYNC:
return "Sync failed to complete ATS invalidation";
default:
- return "Unknown CMDQ Error";
+ return "Unknown error";
}
}
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 7cce1c928a7f..4f5ea466731f 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/highmem.h>
@@ -58,14 +58,11 @@ static int ivpu_suspend(struct ivpu_device *vdev)
{
int ret;
- /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
- pci_save_state(to_pci_dev(vdev->drm.dev));
+ ivpu_prepare_for_reset(vdev);
ret = ivpu_shutdown(vdev);
if (ret)
- ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret);
-
- pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+ ivpu_err(vdev, "Failed to shutdown NPU: %d\n", ret);
return ret;
}
@@ -74,10 +71,10 @@ static int ivpu_resume(struct ivpu_device *vdev)
{
int ret;
- pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
+retry:
pci_restore_state(to_pci_dev(vdev->drm.dev));
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
-retry:
ret = ivpu_hw_power_up(vdev);
if (ret) {
ivpu_err(vdev, "Failed to power up HW: %d\n", ret);
@@ -100,6 +97,7 @@ err_mmu_disable:
ivpu_mmu_disable(vdev);
err_power_down:
ivpu_hw_power_down(vdev);
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
if (!ivpu_fw_is_cold_boot(vdev)) {
ivpu_pm_prepare_cold_boot(vdev);
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
index 1fbc9b921c4f..736c2eb8c0f3 100644
--- a/drivers/accessibility/speakup/main.c
+++ b/drivers/accessibility/speakup/main.c
@@ -574,7 +574,7 @@ static u_long get_word(struct vc_data *vc)
}
attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
buf[cnt++] = attr_ch;
- while (tmpx < vc->vc_cols - 1) {
+ while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
tmp_pos += 2;
tmpx++;
ch = get_char(vc, (u_short *)tmp_pos, &temp);
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index ff1689bb3124..e3a7c2aedd5f 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -469,6 +469,9 @@ config ACPI_REDUCED_HARDWARE_ONLY
If you are unsure what to do, do not enable this option.
+config ACPI_NHLT
+ bool
+
source "drivers/acpi/nfit/Kconfig"
source "drivers/acpi/numa/Kconfig"
source "drivers/acpi/apei/Kconfig"
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 8cc8c0d9c873..39ea5cfa8326 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -14,7 +14,6 @@ tables.o: $(src)/../../include/$(CONFIG_ACPI_CUSTOM_DSDT_FILE) ;
endif
obj-$(CONFIG_ACPI) += tables.o
-obj-$(CONFIG_X86) += blacklist.o
#
# ACPI Core Subsystem (Interpreter)
@@ -46,7 +45,6 @@ acpi-y += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o
obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o
-acpi-$(CONFIG_PCI) += acpi_lpss.o
acpi-y += acpi_apd.o
acpi-y += acpi_platform.o
acpi-y += acpi_pnp.o
@@ -55,10 +53,6 @@ acpi-y += event.o
acpi-y += evged.o
acpi-y += sysfs.o
acpi-y += property.o
-acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
-acpi-$(CONFIG_X86) += x86/apple.o
-acpi-$(CONFIG_X86) += x86/utils.o
-acpi-$(CONFIG_X86) += x86/s2idle.o
acpi-$(CONFIG_DEBUG_FS) += debugfs.o
acpi-y += acpi_lpat.o
acpi-$(CONFIG_ACPI_FPDT) += acpi_fpdt.o
@@ -93,6 +87,7 @@ obj-$(CONFIG_ACPI_THERMAL_LIB) += thermal_lib.o
obj-$(CONFIG_ACPI_THERMAL) += thermal.o
obj-$(CONFIG_ACPI_PLATFORM_PROFILE) += platform_profile.o
obj-$(CONFIG_ACPI_NFIT) += nfit/
+obj-$(CONFIG_ACPI_NHLT) += nhlt.o
obj-$(CONFIG_ACPI_NUMA) += numa/
obj-$(CONFIG_ACPI) += acpi_memhotplug.o
obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o
@@ -132,3 +127,4 @@ obj-$(CONFIG_ARM64) += arm64/
obj-$(CONFIG_ACPI_VIOT) += viot.o
obj-$(CONFIG_RISCV) += riscv/
+obj-$(CONFIG_X86) += x86/
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 30f3fc13c29d..8d18af396de9 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -5,6 +5,7 @@
ccflags-y := -D_LINUX -DBUILDING_ACPICA
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
+CFLAGS_tbfind.o += $(call cc-disable-warning, stringop-truncation)
# use acpi.o to put all files here into acpi.o modparam namespace
obj-y += acpi.o
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 82563b44af35..02012168a087 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -547,7 +547,7 @@ struct acpi_field_info {
struct acpi_ged_handler_info {
struct acpi_ged_handler_info *next;
- u32 int_id; /* The interrupt ID that triggers the execution ofthe evt_method. */
+ u32 int_id; /* The interrupt ID that triggers the execution of the evt_method. */
struct acpi_namespace_node *evt_method; /* The _EVT method to be executed when an interrupt with ID = int_ID is received */
};
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 1bdfeee5d7c5..8fc02946d3cd 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -48,7 +48,7 @@
u8 descriptor_type; /* To differentiate various internal objs */\
u8 type; /* acpi_object_type */\
u16 reference_count; /* For object deletion management */\
- u8 flags;
+ u8 flags
/*
* Note: There are 3 bytes available here before the
* next natural alignment boundary (for both 32/64 cases)
@@ -71,10 +71,12 @@
*****************************************************************************/
struct acpi_object_common {
-ACPI_OBJECT_COMMON_HEADER};
+ ACPI_OBJECT_COMMON_HEADER;
+};
struct acpi_object_integer {
- ACPI_OBJECT_COMMON_HEADER u8 fill[3]; /* Prevent warning on some compilers */
+ ACPI_OBJECT_COMMON_HEADER;
+ u8 fill[3]; /* Prevent warning on some compilers */
u64 value;
};
@@ -86,23 +88,26 @@ struct acpi_object_integer {
*/
#define ACPI_COMMON_BUFFER_INFO(_type) \
_type *pointer; \
- u32 length;
+ u32 length
/* Null terminated, ASCII characters only */
struct acpi_object_string {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO(char) /* String in AML stream or allocated string */
+ ACPI_OBJECT_COMMON_HEADER;
+ ACPI_COMMON_BUFFER_INFO(char); /* String in AML stream or allocated string */
};
struct acpi_object_buffer {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO(u8) /* Buffer in AML stream or allocated buffer */
+ ACPI_OBJECT_COMMON_HEADER;
+ ACPI_COMMON_BUFFER_INFO(u8); /* Buffer in AML stream or allocated buffer */
u32 aml_length;
u8 *aml_start;
struct acpi_namespace_node *node; /* Link back to parent node */
};
struct acpi_object_package {
- ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node; /* Link back to parent node */
+ ACPI_OBJECT_COMMON_HEADER;
+ struct acpi_namespace_node *node; /* Link back to parent node */
union acpi_operand_object **elements; /* Array of pointers to acpi_objects */
u8 *aml_start;
u32 aml_length;
@@ -116,11 +121,13 @@ struct acpi_object_package {
*****************************************************************************/
struct acpi_object_event {
- ACPI_OBJECT_COMMON_HEADER acpi_semaphore os_semaphore; /* Actual OS synchronization object */
+ ACPI_OBJECT_COMMON_HEADER;
+ acpi_semaphore os_semaphore; /* Actual OS synchronization object */
};
struct acpi_object_mutex {
- ACPI_OBJECT_COMMON_HEADER u8 sync_level; /* 0-15, specified in Mutex() call */
+ ACPI_OBJECT_COMMON_HEADER;
+ u8 sync_level; /* 0-15, specified in Mutex() call */
u16 acquisition_depth; /* Allow multiple Acquires, same thread */
acpi_mutex os_mutex; /* Actual OS synchronization object */
acpi_thread_id thread_id; /* Current owner of the mutex */
@@ -132,7 +139,8 @@ struct acpi_object_mutex {
};
struct acpi_object_region {
- ACPI_OBJECT_COMMON_HEADER u8 space_id;
+ ACPI_OBJECT_COMMON_HEADER;
+ u8 space_id;
struct acpi_namespace_node *node; /* Containing namespace node */
union acpi_operand_object *handler; /* Handler for region access */
union acpi_operand_object *next;
@@ -142,7 +150,8 @@ struct acpi_object_region {
};
struct acpi_object_method {
- ACPI_OBJECT_COMMON_HEADER u8 info_flags;
+ ACPI_OBJECT_COMMON_HEADER;
+ u8 info_flags;
u8 param_count;
u8 sync_level;
union acpi_operand_object *mutex;
@@ -178,33 +187,43 @@ struct acpi_object_method {
*/
#define ACPI_COMMON_NOTIFY_INFO \
union acpi_operand_object *notify_list[2]; /* Handlers for system/device notifies */\
- union acpi_operand_object *handler; /* Handler for Address space */
+ union acpi_operand_object *handler /* Handler for Address space */
/* COMMON NOTIFY for POWER, PROCESSOR, DEVICE, and THERMAL */
struct acpi_object_notify_common {
-ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
+ ACPI_OBJECT_COMMON_HEADER;
+ ACPI_COMMON_NOTIFY_INFO;
+};
struct acpi_object_device {
- ACPI_OBJECT_COMMON_HEADER
- ACPI_COMMON_NOTIFY_INFO struct acpi_gpe_block_info *gpe_block;
+ ACPI_OBJECT_COMMON_HEADER;
+ ACPI_COMMON_NOTIFY_INFO;
+ struct acpi_gpe_block_info *gpe_block;
};
struct acpi_object_power_resource {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO u32 system_level;
+ ACPI_OBJECT_COMMON_HEADER;
+ ACPI_COMMON_NOTIFY_INFO;
+ u32 system_level;
u32 resource_order;
};
struct acpi_object_processor {
- ACPI_OBJECT_COMMON_HEADER
- /* The next two fields take advantage of the 3-byte space before NOTIFY_INFO */
+ ACPI_OBJECT_COMMON_HEADER;
+
+ /* The next two fields take advantage of the 3-byte space before NOTIFY_INFO */
+
u8 proc_id;
u8 length;
- ACPI_COMMON_NOTIFY_INFO acpi_io_address address;
+ ACPI_COMMON_NOTIFY_INFO;
+ acpi_io_address address;
};
struct acpi_object_thermal_zone {
-ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
+ ACPI_OBJECT_COMMON_HEADER;
+ ACPI_COMMON_NOTIFY_INFO;
+};
/******************************************************************************
*
@@ -226,17 +245,21 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
u32 base_byte_offset; /* Byte offset within containing object */\
u32 value; /* Value to store into the Bank or Index register */\
u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\
- u8 access_length; /* For serial regions/fields */
+ u8 access_length /* For serial regions/fields */
/* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
struct acpi_object_field_common {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Parent Operation Region object (REGION/BANK fields only) */
+ ACPI_OBJECT_COMMON_HEADER;
+ ACPI_COMMON_FIELD_INFO;
+ union acpi_operand_object *region_obj; /* Parent Operation Region object (REGION/BANK fields only) */
};
struct acpi_object_region_field {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length;
+ ACPI_OBJECT_COMMON_HEADER;
+ ACPI_COMMON_FIELD_INFO;
+ u16 resource_length;
union acpi_operand_object *region_obj; /* Containing op_region object */
u8 *resource_buffer; /* resource_template for serial regions/fields */
u16 pin_number_index; /* Index relative to previous Connection/Template */
@@ -244,16 +267,20 @@ struct acpi_object_region_field {
};
struct acpi_object_bank_field {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Containing op_region object */
+ ACPI_OBJECT_COMMON_HEADER;
+ ACPI_COMMON_FIELD_INFO;
+ union acpi_operand_object *region_obj; /* Containing op_region object */
union acpi_operand_object *bank_obj; /* bank_select Register object */
};
struct acpi_object_index_field {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO
- /*
- * No "RegionObj" pointer needed since the Index and Data registers
- * are each field definitions unto themselves.
- */
+ ACPI_OBJECT_COMMON_HEADER;
+ ACPI_COMMON_FIELD_INFO;
+
+ /*
+ * No "RegionObj" pointer needed since the Index and Data registers
+ * are each field definitions unto themselves.
+ */
union acpi_operand_object *index_obj; /* Index register */
union acpi_operand_object *data_obj; /* Data register */
};
@@ -261,7 +288,9 @@ struct acpi_object_index_field {
/* The buffer_field is different in that it is part of a Buffer, not an op_region */
struct acpi_object_buffer_field {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u8 is_create_field; /* Special case for objects created by create_field() */
+ ACPI_OBJECT_COMMON_HEADER;
+ ACPI_COMMON_FIELD_INFO;
+ u8 is_create_field; /* Special case for objects created by create_field() */
union acpi_operand_object *buffer_obj; /* Containing Buffer object */
};
@@ -272,7 +301,8 @@ struct acpi_object_buffer_field {
*****************************************************************************/
struct acpi_object_notify_handler {
- ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node; /* Parent device */
+ ACPI_OBJECT_COMMON_HEADER;
+ struct acpi_namespace_node *node; /* Parent device */
u32 handler_type; /* Type: Device/System/Both */
acpi_notify_handler handler; /* Handler address */
void *context;
@@ -280,7 +310,8 @@ struct acpi_object_notify_handler {
};
struct acpi_object_addr_handler {
- ACPI_OBJECT_COMMON_HEADER u8 space_id;
+ ACPI_OBJECT_COMMON_HEADER;
+ u8 space_id;
u8 handler_flags;
acpi_adr_space_handler handler;
struct acpi_namespace_node *node; /* Parent device */
@@ -307,7 +338,8 @@ struct acpi_object_addr_handler {
* The Reference.Class differentiates these types.
*/
struct acpi_object_reference {
- ACPI_OBJECT_COMMON_HEADER u8 class; /* Reference Class */
+ ACPI_OBJECT_COMMON_HEADER;
+ u8 class; /* Reference Class */
u8 target_type; /* Used for Index Op */
u8 resolved; /* Reference has been resolved to a value */
void *object; /* name_op=>HANDLE to obj, index_op=>union acpi_operand_object */
@@ -340,7 +372,8 @@ typedef enum {
* Currently: Region and field_unit types
*/
struct acpi_object_extra {
- ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */
+ ACPI_OBJECT_COMMON_HEADER;
+ struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */
struct acpi_namespace_node *scope_node;
void *region_context; /* Region-specific data */
u8 *aml_start;
@@ -350,14 +383,16 @@ struct acpi_object_extra {
/* Additional data that can be attached to namespace nodes */
struct acpi_object_data {
- ACPI_OBJECT_COMMON_HEADER acpi_object_handler handler;
+ ACPI_OBJECT_COMMON_HEADER;
+ acpi_object_handler handler;
void *pointer;
};
/* Structure used when objects are cached for reuse */
struct acpi_object_cache_list {
- ACPI_OBJECT_COMMON_HEADER union acpi_operand_object *next; /* Link for object cache and internal lists */
+ ACPI_OBJECT_COMMON_HEADER;
+ union acpi_operand_object *next; /* Link for object cache and internal lists */
};
/******************************************************************************
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 0dbc4d88919a..38f408cf13ce 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -413,6 +413,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
gpe_event_info->flags &= ~(ACPI_GPE_DISPATCH_MASK);
gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
gpe_event_info->dispatch.method_node = method_node;
+ walk_info->count++;
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
"Registered GPE method %s as GPE number 0x%.2X\n",
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 44267a92bce5..3c126c6d306b 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -315,23 +315,19 @@ void acpi_tb_parse_fadt(void)
ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
NULL, FALSE, TRUE, &acpi_gbl_dsdt_index);
- /* If Hardware Reduced flag is set, there is no FACS */
-
- if (!acpi_gbl_reduced_hardware) {
- if (acpi_gbl_FADT.facs) {
- acpi_tb_install_standard_table((acpi_physical_address)
- acpi_gbl_FADT.facs,
- ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
- NULL, FALSE, TRUE,
- &acpi_gbl_facs_index);
- }
- if (acpi_gbl_FADT.Xfacs) {
- acpi_tb_install_standard_table((acpi_physical_address)
- acpi_gbl_FADT.Xfacs,
- ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
- NULL, FALSE, TRUE,
- &acpi_gbl_xfacs_index);
- }
+ if (acpi_gbl_FADT.facs) {
+ acpi_tb_install_standard_table((acpi_physical_address)
+ acpi_gbl_FADT.facs,
+ ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
+ NULL, FALSE, TRUE,
+ &acpi_gbl_facs_index);
+ }
+ if (acpi_gbl_FADT.Xfacs) {
+ acpi_tb_install_standard_table((acpi_physical_address)
+ acpi_gbl_FADT.Xfacs,
+ ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
+ NULL, FALSE, TRUE,
+ &acpi_gbl_xfacs_index);
}
}
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index bb4a56e5673a..15fa68a5ea6e 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -36,12 +36,7 @@ acpi_status acpi_tb_initialize_facs(void)
{
struct acpi_table_facs *facs;
- /* If Hardware Reduced flag is set, there is no FACS */
-
- if (acpi_gbl_reduced_hardware) {
- acpi_gbl_FACS = NULL;
- return (AE_OK);
- } else if (acpi_gbl_FADT.Xfacs &&
+ if (acpi_gbl_FADT.Xfacs &&
(!acpi_gbl_FADT.facs
|| !acpi_gbl_use32_bit_facs_addresses)) {
(void)acpi_get_table_by_index(acpi_gbl_xfacs_index,
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index c5f6c85a3a09..3d71bd9245c7 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -62,7 +62,12 @@ void acpi_ut_track_stack_ptr(void)
acpi_size current_sp;
if (&current_sp < acpi_gbl_lowest_stack_pointer) {
+#pragma GCC diagnostic push
+#if defined(__GNUC__) && __GNUC__ >= 12
+#pragma GCC diagnostic ignored "-Wdangling-pointer="
+#endif
acpi_gbl_lowest_stack_pointer = &current_sp;
+#pragma GCC diagnostic pop
}
if (acpi_gbl_nesting_level > acpi_gbl_deepest_nesting) {
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index 01faca3a238a..9515bcfe5e97 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -851,7 +851,7 @@ err_put_table:
return rc;
}
-static void einj_remove(struct platform_device *pdev)
+static void __exit einj_remove(struct platform_device *pdev)
{
struct apei_exec_context ctx;
@@ -873,8 +873,14 @@ static void einj_remove(struct platform_device *pdev)
}
static struct platform_device *einj_dev;
-static struct platform_driver einj_driver = {
- .remove_new = einj_remove,
+/*
+ * einj_remove() lives in .exit.text. For drivers registered via
+ * platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver einj_driver __refdata = {
+ .remove_new = __exit_p(einj_remove),
.driver = {
.name = "acpi-einj",
},
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index d9fa730416f1..787eca838410 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -112,6 +112,17 @@ int acpi_bus_get_status(struct acpi_device *device)
if (ACPI_FAILURE(status))
return -ENODEV;
+ if (!device->status.present && device->status.enabled) {
+ pr_info(FW_BUG "Device [%s] status [%08x]: not present and enabled\n",
+ device->pnp.bus_id, (u32)sta);
+ device->status.enabled = 0;
+ /*
+ * The status is clearly invalid, so clear the functional bit as
+ * well to avoid attempting to use the device.
+ */
+ device->status.functional = 0;
+ }
+
acpi_set_device_status(device, sta);
if (device->status.functional && !device->status.present) {
@@ -316,9 +327,14 @@ static void acpi_bus_osc_negotiate_platform_control(void)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT;
if (IS_ENABLED(CONFIG_ACPI_PROCESSOR))
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
+ if (IS_ENABLED(CONFIG_ACPI_THERMAL))
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_FAST_THERMAL_SAMPLING_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_OVER_16_PSTATES_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_GED_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_IRQ_RESOURCE_SOURCE_SUPPORT;
if (IS_ENABLED(CONFIG_ACPI_PRMT))
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PRM_SUPPORT;
if (IS_ENABLED(CONFIG_ACPI_FFH))
@@ -990,25 +1006,26 @@ EXPORT_SYMBOL_GPL(acpi_driver_match_device);
-------------------------------------------------------------------------- */
/**
- * acpi_bus_register_driver - register a driver with the ACPI bus
+ * __acpi_bus_register_driver - register a driver with the ACPI bus
* @driver: driver being registered
+ * @owner: owning module/driver
*
* Registers a driver with the ACPI bus. Searches the namespace for all
* devices that match the driver's criteria and binds. Returns zero for
* success or a negative error status for failure.
*/
-int acpi_bus_register_driver(struct acpi_driver *driver)
+int __acpi_bus_register_driver(struct acpi_driver *driver, struct module *owner)
{
if (acpi_disabled)
return -ENODEV;
driver->drv.name = driver->name;
driver->drv.bus = &acpi_bus_type;
- driver->drv.owner = driver->owner;
+ driver->drv.owner = owner;
return driver_register(&driver->drv);
}
-EXPORT_SYMBOL(acpi_bus_register_driver);
+EXPORT_SYMBOL(__acpi_bus_register_driver);
/**
* acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 4bfbe55553f4..1d857978f5f4 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -170,8 +170,8 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
#define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
/* Shift and apply the mask for CPC reads/writes */
-#define MASK_VAL(reg, val) ((val) >> ((reg)->bit_offset & \
- GENMASK(((reg)->bit_width), 0)))
+#define MASK_VAL(reg, val) (((val) >> (reg)->bit_offset) & \
+ GENMASK(((reg)->bit_width) - 1, 0))
static ssize_t show_feedback_ctrs(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
@@ -686,8 +686,10 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
if (!osc_sb_cppc2_support_acked) {
pr_debug("CPPC v2 _OSC not acked\n");
- if (!cpc_supported_by_cpu())
+ if (!cpc_supported_by_cpu()) {
+ pr_debug("CPPC is not supported by the CPU\n");
return -ENODEV;
+ }
}
/* Parse the ACPI _CPC table for this CPU. */
@@ -1002,14 +1004,14 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
}
*val = 0;
+ size = GET_BIT_WIDTH(reg);
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- u32 width = GET_BIT_WIDTH(reg);
u32 val_u32;
acpi_status status;
status = acpi_os_read_port((acpi_io_address)reg->address,
- &val_u32, width);
+ &val_u32, size);
if (ACPI_FAILURE(status)) {
pr_debug("Error: Failed to read SystemIO port %llx\n",
reg->address);
@@ -1018,17 +1020,22 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = val_u32;
return 0;
- } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
+ /*
+ * For registers in PCC space, the register size is determined
+ * by the bit width field; the access size is used to indicate
+ * the PCC subspace id.
+ */
+ size = reg->bit_width;
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
+ }
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
return cpc_read_ffh(cpu, reg, val);
else
return acpi_os_read_memory((acpi_physical_address)reg->address,
- val, reg->bit_width);
-
- size = GET_BIT_WIDTH(reg);
+ val, size);
switch (size) {
case 8:
@@ -1044,8 +1051,13 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = readq_relaxed(vaddr);
break;
default:
- pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
- reg->bit_width, pcc_ss_id);
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ pr_debug("Error: Cannot read %u bit width from system memory: 0x%llx\n",
+ size, reg->address);
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
+ size, pcc_ss_id);
+ }
return -EFAULT;
}
@@ -1063,12 +1075,13 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
+ size = GET_BIT_WIDTH(reg);
+
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- u32 width = GET_BIT_WIDTH(reg);
acpi_status status;
status = acpi_os_write_port((acpi_io_address)reg->address,
- (u32)val, width);
+ (u32)val, size);
if (ACPI_FAILURE(status)) {
pr_debug("Error: Failed to write SystemIO port %llx\n",
reg->address);
@@ -1076,17 +1089,22 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
}
return 0;
- } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
+ /*
+ * For registers in PCC space, the register size is determined
+ * by the bit width field; the access size is used to indicate
+ * the PCC subspace id.
+ */
+ size = reg->bit_width;
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
+ }
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
return cpc_write_ffh(cpu, reg, val);
else
return acpi_os_write_memory((acpi_physical_address)reg->address,
- val, reg->bit_width);
-
- size = GET_BIT_WIDTH(reg);
+ val, size);
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
val = MASK_VAL(reg, val);
@@ -1105,8 +1123,13 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
writeq_relaxed(val, vaddr);
break;
default:
- pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
- reg->bit_width, pcc_ss_id);
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ pr_debug("Error: Cannot write %u bit width to system memory: 0x%llx\n",
+ size, reg->address);
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
+ size, pcc_ss_id);
+ }
ret_val = -EFAULT;
break;
}
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index a7c00ef78086..34affbda295e 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -88,43 +88,29 @@ static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
enum dock_callback_type cb_type)
{
struct acpi_device *adev = dd->adev;
+ acpi_hp_fixup fixup = NULL;
+ acpi_hp_uevent uevent = NULL;
+ acpi_hp_notify notify = NULL;
acpi_lock_hp_context();
- if (!adev->hp)
- goto out;
-
- if (cb_type == DOCK_CALL_FIXUP) {
- void (*fixup)(struct acpi_device *);
-
- fixup = adev->hp->fixup;
- if (fixup) {
- acpi_unlock_hp_context();
- fixup(adev);
- return;
- }
- } else if (cb_type == DOCK_CALL_UEVENT) {
- void (*uevent)(struct acpi_device *, u32);
-
- uevent = adev->hp->uevent;
- if (uevent) {
- acpi_unlock_hp_context();
- uevent(adev, event);
- return;
- }
- } else {
- int (*notify)(struct acpi_device *, u32);
-
- notify = adev->hp->notify;
- if (notify) {
- acpi_unlock_hp_context();
- notify(adev, event);
- return;
- }
+ if (adev->hp) {
+ if (cb_type == DOCK_CALL_FIXUP)
+ fixup = adev->hp->fixup;
+ else if (cb_type == DOCK_CALL_UEVENT)
+ uevent = adev->hp->uevent;
+ else
+ notify = adev->hp->notify;
}
- out:
acpi_unlock_hp_context();
+
+ if (fixup)
+ fixup(adev);
+ else if (uevent)
+ uevent(adev, event);
+ else if (notify)
+ notify(adev, event);
}
static struct dock_station *find_dock_station(acpi_handle handle)
diff --git a/drivers/acpi/dptf/dptf_pch_fivr.c b/drivers/acpi/dptf/dptf_pch_fivr.c
index 654aaa53c67f..d202730fafd8 100644
--- a/drivers/acpi/dptf/dptf_pch_fivr.c
+++ b/drivers/acpi/dptf/dptf_pch_fivr.c
@@ -150,6 +150,7 @@ static const struct acpi_device_id pch_fivr_device_ids[] = {
{"INTC1045", 0},
{"INTC1049", 0},
{"INTC1064", 0},
+ {"INTC106B", 0},
{"INTC10A3", 0},
{"", 0},
};
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index b8187babbbbb..8023b3e23315 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -232,6 +232,8 @@ static const struct acpi_device_id int3407_device_ids[] = {
{"INTC1061", 0},
{"INTC1065", 0},
{"INTC1066", 0},
+ {"INTC106C", 0},
+ {"INTC106D", 0},
{"INTC10A4", 0},
{"INTC10A5", 0},
{"", 0},
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index b7113fa92fa6..014ada759954 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -43,6 +43,12 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
{"INTC1064"},
{"INTC1065"},
{"INTC1066"},
+ {"INTC1068"},
+ {"INTC1069"},
+ {"INTC106A"},
+ {"INTC106B"},
+ {"INTC106C"},
+ {"INTC106D"},
{"INTC10A0"},
{"INTC10A1"},
{"INTC10A2"},
diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
index e7b4b4e4a55e..f89d19c922dc 100644
--- a/drivers/acpi/fan.h
+++ b/drivers/acpi/fan.h
@@ -15,6 +15,7 @@
{"INTC1044", }, /* Fan for Tiger Lake generation */ \
{"INTC1048", }, /* Fan for Alder Lake generation */ \
{"INTC1063", }, /* Fan for Meteor Lake generation */ \
+ {"INTC106A", }, /* Fan for Lunar Lake generation */ \
{"INTC10A2", }, /* Fan for Raptor Lake generation */ \
{"PNP0C0B", } /* Generic ACPI fan */
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index ca72a0dc5715..60c483836756 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -69,7 +69,8 @@ void acpi_debugfs_init(void);
#else
static inline void acpi_debugfs_init(void) { return; }
#endif
-#ifdef CONFIG_PCI
+
+#if defined(CONFIG_X86) && defined(CONFIG_PCI)
void acpi_lpss_init(void);
#else
static inline void acpi_lpss_init(void) {}
diff --git a/drivers/acpi/nhlt.c b/drivers/acpi/nhlt.c
new file mode 100644
index 000000000000..dc1bd0df9228
--- /dev/null
+++ b/drivers/acpi/nhlt.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright(c) 2023-2024 Intel Corporation
+ *
+ * Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+ * Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+ */
+
+#define pr_fmt(fmt) "ACPI: NHLT: " fmt
+
+#include <linux/acpi.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/minmax.h>
+#include <linux/printk.h>
+#include <linux/types.h>
+#include <acpi/nhlt.h>
+
+static struct acpi_table_nhlt *acpi_gbl_nhlt;
+
+static struct acpi_table_nhlt empty_nhlt = {
+ .header = {
+ .signature = ACPI_SIG_NHLT,
+ },
+};
+
+/**
+ * acpi_nhlt_get_gbl_table - Retrieve a pointer to the first NHLT table.
+ *
+ * If there is no NHLT in the system, acpi_gbl_nhlt will instead point to an
+ * empty table.
+ *
+ * Return: ACPI status code of the operation.
+ */
+acpi_status acpi_nhlt_get_gbl_table(void)
+{
+ acpi_status status;
+
+ status = acpi_get_table(ACPI_SIG_NHLT, 0, (struct acpi_table_header **)(&acpi_gbl_nhlt));
+ if (!acpi_gbl_nhlt)
+ acpi_gbl_nhlt = &empty_nhlt;
+ return status;
+}
+EXPORT_SYMBOL_GPL(acpi_nhlt_get_gbl_table);
+
+/**
+ * acpi_nhlt_put_gbl_table - Release the global NHLT table.
+ */
+void acpi_nhlt_put_gbl_table(void)
+{
+ acpi_put_table((struct acpi_table_header *)acpi_gbl_nhlt);
+}
+EXPORT_SYMBOL_GPL(acpi_nhlt_put_gbl_table);
+
+/**
+ * acpi_nhlt_endpoint_match - Verify if an endpoint matches criteria.
+ * @ep: the endpoint to check.
+ * @link_type: the hardware link type, e.g.: PDM or SSP.
+ * @dev_type: the device type.
+ * @dir: stream direction.
+ * @bus_id: the ID of virtual bus hosting the endpoint.
+ *
+ * Either of @link_type, @dev_type, @dir or @bus_id may be set to a negative
+ * value to ignore the parameter when matching.
+ *
+ * Return: %true if endpoint matches specified criteria or %false otherwise.
+ */
+bool acpi_nhlt_endpoint_match(const struct acpi_nhlt_endpoint *ep,
+ int link_type, int dev_type, int dir, int bus_id)
+{
+ return ep &&
+ (link_type < 0 || ep->link_type == link_type) &&
+ (dev_type < 0 || ep->device_type == dev_type) &&
+ (bus_id < 0 || ep->virtual_bus_id == bus_id) &&
+ (dir < 0 || ep->direction == dir);
+}
+EXPORT_SYMBOL_GPL(acpi_nhlt_endpoint_match);
+
+/**
+ * acpi_nhlt_tb_find_endpoint - Search a NHLT table for an endpoint.
+ * @tb: the table to search.
+ * @link_type: the hardware link type, e.g.: PDM or SSP.
+ * @dev_type: the device type.
+ * @dir: stream direction.
+ * @bus_id: the ID of virtual bus hosting the endpoint.
+ *
+ * Either of @link_type, @dev_type, @dir or @bus_id may be set to a negative
+ * value to ignore the parameter during the search.
+ *
+ * Return: A pointer to endpoint matching the criteria, %NULL if not found or
+ * an ERR_PTR() otherwise.
+ */
+struct acpi_nhlt_endpoint *
+acpi_nhlt_tb_find_endpoint(const struct acpi_table_nhlt *tb,
+ int link_type, int dev_type, int dir, int bus_id)
+{
+ struct acpi_nhlt_endpoint *ep;
+
+ for_each_nhlt_endpoint(tb, ep)
+ if (acpi_nhlt_endpoint_match(ep, link_type, dev_type, dir, bus_id))
+ return ep;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(acpi_nhlt_tb_find_endpoint);
+
+/**
+ * acpi_nhlt_find_endpoint - Search all NHLT tables for an endpoint.
+ * @link_type: the hardware link type, e.g.: PDM or SSP.
+ * @dev_type: the device type.
+ * @dir: stream direction.
+ * @bus_id: the ID of virtual bus hosting the endpoint.
+ *
+ * Either of @link_type, @dev_type, @dir or @bus_id may be set to a negative
+ * value to ignore the parameter during the search.
+ *
+ * Return: A pointer to endpoint matching the criteria, %NULL if not found or
+ * an ERR_PTR() otherwise.
+ */
+struct acpi_nhlt_endpoint *
+acpi_nhlt_find_endpoint(int link_type, int dev_type, int dir, int bus_id)
+{
+ /* TODO: Currently limited to table of index 0. */
+ return acpi_nhlt_tb_find_endpoint(acpi_gbl_nhlt, link_type, dev_type, dir, bus_id);
+}
+EXPORT_SYMBOL_GPL(acpi_nhlt_find_endpoint);
+
+/**
+ * acpi_nhlt_endpoint_find_fmtcfg - Search endpoint's formats configuration space
+ * for a specific format.
+ * @ep: the endpoint to search.
+ * @ch: number of channels.
+ * @rate: samples per second.
+ * @vbps: valid bits per sample.
+ * @bps: bits per sample.
+ *
+ * Return: A pointer to format matching the criteria, %NULL if not found or
+ * an ERR_PTR() otherwise.
+ */
+struct acpi_nhlt_format_config *
+acpi_nhlt_endpoint_find_fmtcfg(const struct acpi_nhlt_endpoint *ep,
+ u16 ch, u32 rate, u16 vbps, u16 bps)
+{
+ struct acpi_nhlt_wave_formatext *wav;
+ struct acpi_nhlt_format_config *fmt;
+
+ for_each_nhlt_endpoint_fmtcfg(ep, fmt) {
+ wav = &fmt->format;
+
+ if (wav->valid_bits_per_sample == vbps &&
+ wav->samples_per_sec == rate &&
+ wav->bits_per_sample == bps &&
+ wav->channel_count == ch)
+ return fmt;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(acpi_nhlt_endpoint_find_fmtcfg);
+
+/**
+ * acpi_nhlt_tb_find_fmtcfg - Search a NHLT table for a specific format.
+ * @tb: the table to search.
+ * @link_type: the hardware link type, e.g.: PDM or SSP.
+ * @dev_type: the device type.
+ * @dir: stream direction.
+ * @bus_id: the ID of virtual bus hosting the endpoint.
+ *
+ * @ch: number of channels.
+ * @rate: samples per second.
+ * @vbps: valid bits per sample.
+ * @bps: bits per sample.
+ *
+ * Either of @link_type, @dev_type, @dir or @bus_id may be set to a negative
+ * value to ignore the parameter during the search.
+ *
+ * Return: A pointer to format matching the criteria, %NULL if not found or
+ * an ERR_PTR() otherwise.
+ */
+struct acpi_nhlt_format_config *
+acpi_nhlt_tb_find_fmtcfg(const struct acpi_table_nhlt *tb,
+ int link_type, int dev_type, int dir, int bus_id,
+ u16 ch, u32 rate, u16 vbps, u16 bps)
+{
+ struct acpi_nhlt_format_config *fmt;
+ struct acpi_nhlt_endpoint *ep;
+
+ for_each_nhlt_endpoint(tb, ep) {
+ if (!acpi_nhlt_endpoint_match(ep, link_type, dev_type, dir, bus_id))
+ continue;
+
+ fmt = acpi_nhlt_endpoint_find_fmtcfg(ep, ch, rate, vbps, bps);
+ if (fmt)
+ return fmt;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(acpi_nhlt_tb_find_fmtcfg);
+
+/**
+ * acpi_nhlt_find_fmtcfg - Search all NHLT tables for a specific format.
+ * @link_type: the hardware link type, e.g.: PDM or SSP.
+ * @dev_type: the device type.
+ * @dir: stream direction.
+ * @bus_id: the ID of virtual bus hosting the endpoint.
+ *
+ * @ch: number of channels.
+ * @rate: samples per second.
+ * @vbps: valid bits per sample.
+ * @bps: bits per sample.
+ *
+ * Either of @link_type, @dev_type, @dir or @bus_id may be set to a negative
+ * value to ignore the parameter during the search.
+ *
+ * Return: A pointer to format matching the criteria, %NULL if not found or
+ * an ERR_PTR() otherwise.
+ */
+struct acpi_nhlt_format_config *
+acpi_nhlt_find_fmtcfg(int link_type, int dev_type, int dir, int bus_id,
+ u16 ch, u32 rate, u16 vbps, u16 bps)
+{
+ /* TODO: Currently limited to table of index 0. */
+ return acpi_nhlt_tb_find_fmtcfg(acpi_gbl_nhlt, link_type, dev_type, dir, bus_id,
+ ch, rate, vbps, bps);
+}
+EXPORT_SYMBOL_GPL(acpi_nhlt_find_fmtcfg);
+
+static bool acpi_nhlt_config_is_micdevice(struct acpi_nhlt_config *cfg)
+{
+ return cfg->capabilities_size >= sizeof(struct acpi_nhlt_micdevice_config);
+}
+
+static bool acpi_nhlt_config_is_vendor_micdevice(struct acpi_nhlt_config *cfg)
+{
+ struct acpi_nhlt_vendor_micdevice_config *devcfg = __acpi_nhlt_config_caps(cfg);
+
+ return cfg->capabilities_size >= sizeof(*devcfg) &&
+ cfg->capabilities_size == struct_size(devcfg, mics, devcfg->mics_count);
+}
+
+/**
+ * acpi_nhlt_endpoint_mic_count - Retrieve number of digital microphones for a PDM endpoint.
+ * @ep: the endpoint to return microphones count for.
+ *
+ * Return: A number of microphones or an error code if an invalid endpoint is provided.
+ */
+int acpi_nhlt_endpoint_mic_count(const struct acpi_nhlt_endpoint *ep)
+{
+ union acpi_nhlt_device_config *devcfg;
+ struct acpi_nhlt_format_config *fmt;
+ struct acpi_nhlt_config *cfg;
+ u16 max_ch = 0;
+
+ if (!ep || ep->link_type != ACPI_NHLT_LINKTYPE_PDM)
+ return -EINVAL;
+
+ /* Find max number of channels based on formats configuration. */
+ for_each_nhlt_endpoint_fmtcfg(ep, fmt)
+ max_ch = max(fmt->format.channel_count, max_ch);
+
+ cfg = __acpi_nhlt_endpoint_config(ep);
+ devcfg = __acpi_nhlt_config_caps(cfg);
+
+ /* If @ep is not a mic array, fallback to channels count. */
+ if (!acpi_nhlt_config_is_micdevice(cfg) ||
+ devcfg->gen.config_type != ACPI_NHLT_CONFIGTYPE_MICARRAY)
+ return max_ch;
+
+ switch (devcfg->mic.array_type) {
+ case ACPI_NHLT_ARRAYTYPE_LINEAR2_SMALL:
+ case ACPI_NHLT_ARRAYTYPE_LINEAR2_BIG:
+ return 2;
+
+ case ACPI_NHLT_ARRAYTYPE_LINEAR4_GEO1:
+ case ACPI_NHLT_ARRAYTYPE_PLANAR4_LSHAPED:
+ case ACPI_NHLT_ARRAYTYPE_LINEAR4_GEO2:
+ return 4;
+
+ case ACPI_NHLT_ARRAYTYPE_VENDOR:
+ if (!acpi_nhlt_config_is_vendor_micdevice(cfg))
+ return -EINVAL;
+ return devcfg->vendor_mic.mics_count;
+
+ default:
+ pr_warn("undefined mic array type: %#x\n", devcfg->mic.array_type);
+ return max_ch;
+ }
+}
+EXPORT_SYMBOL_GPL(acpi_nhlt_endpoint_mic_count);
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index e45e64993c50..e3f26e71637a 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -208,16 +208,26 @@ int __init srat_disabled(void)
return acpi_numa < 0;
}
-#if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH)
+__weak int __init numa_fill_memblks(u64 start, u64 end)
+{
+ return NUMA_NO_MEMBLK;
+}
+
/*
* Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
* I/O localities since SRAT does not list them. I/O localities are
* not supported at this point.
*/
-void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
+static int __init acpi_parse_slit(struct acpi_table_header *table)
{
+ struct acpi_table_slit *slit = (struct acpi_table_slit *)table;
int i, j;
+ if (!slit_valid(slit)) {
+ pr_info("SLIT table looks invalid. Not used.\n");
+ return -EINVAL;
+ }
+
for (i = 0; i < slit->locality_count; i++) {
const int from_node = pxm_to_node(i);
@@ -234,28 +244,34 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
slit->entry[slit->locality_count * i + j]);
}
}
+
+ return 0;
}
-/*
- * Default callback for parsing of the Proximity Domain <-> Memory
- * Area mappings
- */
-int __init
-acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
+static int parsed_numa_memblks __initdata;
+
+static int __init
+acpi_parse_memory_affinity(union acpi_subtable_headers *header,
+ const unsigned long table_end)
{
+ struct acpi_srat_mem_affinity *ma;
u64 start, end;
u32 hotpluggable;
int node, pxm;
+ ma = (struct acpi_srat_mem_affinity *)header;
+
+ acpi_table_print_srat_entry(&header->common);
+
if (srat_disabled())
- goto out_err;
+ return 0;
if (ma->header.length < sizeof(struct acpi_srat_mem_affinity)) {
pr_err("SRAT: Unexpected header length: %d\n",
ma->header.length);
goto out_err_bad_srat;
}
if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
- goto out_err;
+ return 0;
hotpluggable = IS_ENABLED(CONFIG_MEMORY_HOTPLUG) &&
(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE);
@@ -293,11 +309,15 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
max_possible_pfn = max(max_possible_pfn, PFN_UP(end - 1));
+ parsed_numa_memblks++;
+
return 0;
+
out_err_bad_srat:
+ /* Just disable SRAT, but do not fail and ignore errors. */
bad_srat();
-out_err:
- return -EINVAL;
+
+ return 0;
}
static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
@@ -340,26 +360,6 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
(*fake_pxm)++;
return 0;
}
-#else
-static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
- void *arg, const unsigned long table_end)
-{
- return 0;
-}
-#endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */
-
-static int __init acpi_parse_slit(struct acpi_table_header *table)
-{
- struct acpi_table_slit *slit = (struct acpi_table_slit *)table;
-
- if (!slit_valid(slit)) {
- pr_info("SLIT table looks invalid. Not used.\n");
- return -EINVAL;
- }
- acpi_numa_slit_init(slit);
-
- return 0;
-}
void __init __weak
acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
@@ -450,24 +450,6 @@ acpi_parse_gi_affinity(union acpi_subtable_headers *header,
}
#endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */
-static int __initdata parsed_numa_memblks;
-
-static int __init
-acpi_parse_memory_affinity(union acpi_subtable_headers * header,
- const unsigned long end)
-{
- struct acpi_srat_mem_affinity *memory_affinity;
-
- memory_affinity = (struct acpi_srat_mem_affinity *)header;
-
- acpi_table_print_srat_entry(&header->common);
-
- /* let architecture-dependent part to do it */
- if (!acpi_numa_memory_affinity_init(memory_affinity))
- parsed_numa_memblks++;
- return 0;
-}
-
static int __init acpi_parse_srat(struct acpi_table_header *table)
{
struct acpi_table_srat *srat = (struct acpi_table_srat *)table;
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 2b73580c9f36..80a52a4e66dd 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -31,9 +31,14 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data,
* not defined without a warning. For instance if any of the properties
* from different GUID appear in a property list of another, it will be
* accepted by the kernel. Firmware validation tools should catch these.
+ *
+ * References:
+ *
+ * [1] UEFI DSD Guide.
+ * https://github.com/UEFI/DSD-Guide/blob/main/src/dsd-guide.adoc
*/
static const guid_t prp_guids[] = {
- /* ACPI _DSD device properties GUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */
+ /* ACPI _DSD device properties GUID [1]: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */
GUID_INIT(0xdaffd814, 0x6eba, 0x4d8c,
0x8a, 0x91, 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01),
/* Hotplug in D3 GUID: 6211e2c0-58a3-4af3-90e1-927a4e0c55a4 */
@@ -53,12 +58,12 @@ static const guid_t prp_guids[] = {
0xa5, 0x61, 0x99, 0xa5, 0x18, 0x97, 0x62, 0xd0),
};
-/* ACPI _DSD data subnodes GUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
+/* ACPI _DSD data subnodes GUID [1]: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
static const guid_t ads_guid =
GUID_INIT(0xdbb8e3e6, 0x5886, 0x4ba6,
0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b);
-/* ACPI _DSD data buffer GUID: edb12dd0-363d-4085-a3d2-49522ca160c4 */
+/* ACPI _DSD data buffer GUID [1]: edb12dd0-363d-4085-a3d2-49522ca160c4 */
static const guid_t buffer_prop_guid =
GUID_INIT(0xedb12dd0, 0x363d, 0x4085,
0xa3, 0xd2, 0x49, 0x52, 0x2c, 0xa1, 0x60, 0xc4);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 59423fe9d0f2..b5bf8b81a050 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -518,6 +518,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
+ /* Asus Vivobook Pro N6506MV */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "N6506MV"),
+ },
+ },
+ {
/* LG Electronics 17U70P */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
@@ -534,6 +541,12 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
*/
static const struct dmi_system_id irq1_edge_low_force_override[] = {
{
+ /* XMG APEX 17 (M23) */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxBGxx"),
+ },
+ },
+ {
/* TongFang GMxRGxx/XMG CORE 15 (M22)/TUXEDO Stellaris 15 Gen4 AMD */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
@@ -630,6 +643,18 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
DMI_MATCH(DMI_BOARD_NAME, "X565"),
},
},
+ {
+ /* TongFang GXxHRXx/TUXEDO InfinityBook Pro Gen9 AMD */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GXxHRXx"),
+ },
+ },
+ {
+ /* TongFang GMxHGxx/TUXEDO Stellaris Slim Gen1 AMD */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
+ },
+ },
{ }
};
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 7c157bf92695..e41fb5716bc0 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -73,8 +73,7 @@ void acpi_unlock_hp_context(void)
void acpi_initialize_hp_context(struct acpi_device *adev,
struct acpi_hotplug_context *hp,
- int (*notify)(struct acpi_device *, u32),
- void (*uevent)(struct acpi_device *, u32))
+ acpi_hp_notify notify, acpi_hp_uevent uevent)
{
acpi_lock_hp_context();
hp->notify = notify;
@@ -428,7 +427,7 @@ void acpi_device_hotplug(struct acpi_device *adev, u32 src)
} else if (adev->flags.hotplug_notify) {
error = acpi_generic_hotplug_event(adev, src);
} else {
- int (*notify)(struct acpi_device *, u32);
+ acpi_hp_notify notify;
acpi_lock_hp_context();
notify = adev->hp ? adev->hp->notify : NULL;
@@ -1298,10 +1297,10 @@ const char *acpi_device_hid(struct acpi_device *device)
{
struct acpi_hardware_id *hid;
- if (list_empty(&device->pnp.ids))
+ hid = list_first_entry_or_null(&device->pnp.ids, struct acpi_hardware_id, list);
+ if (!hid)
return dummy_hid;
- hid = list_first_entry(&device->pnp.ids, struct acpi_hardware_id, list);
return hid->id;
}
EXPORT_SYMBOL(acpi_device_hid);
@@ -1581,12 +1580,13 @@ int acpi_iommu_fwspec_init(struct device *dev, u32 id,
struct fwnode_handle *fwnode,
const struct iommu_ops *ops)
{
- int ret = iommu_fwspec_init(dev, fwnode, ops);
+ int ret;
- if (!ret)
- ret = iommu_fwspec_add_ids(dev, &id, 1);
+ ret = iommu_fwspec_init(dev, fwnode, ops);
+ if (ret)
+ return ret;
- return ret;
+ return iommu_fwspec_add_ids(dev, &id, 1);
}
static inline const struct iommu_ops *acpi_iommu_fwspec_ops(struct device *dev)
@@ -1625,12 +1625,11 @@ static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in)
if (!err && dev->bus)
err = iommu_probe_device(dev);
- /* Ignore all other errors apart from EPROBE_DEFER */
- if (err == -EPROBE_DEFER) {
+ if (err == -EPROBE_DEFER)
return err;
- } else if (err) {
+ if (err) {
dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
- return -ENODEV;
+ return err;
}
if (!acpi_iommu_fwspec_ops(dev))
return -ENODEV;
@@ -1671,13 +1670,14 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
acpi_arch_dma_setup(dev);
+ /* Ignore all other errors apart from EPROBE_DEFER */
ret = acpi_iommu_configure_id(dev, input_id);
if (ret == -EPROBE_DEFER)
return -EPROBE_DEFER;
/*
* Historically this routine doesn't fail driver probing due to errors
- * in acpi_iommu_configure_id()
+ * in acpi_iommu_configure_id().
*/
arch_setup_dma_ops(dev, 0, U64_MAX, attr == DEV_DMA_COHERENT);
@@ -1843,7 +1843,8 @@ static void acpi_scan_dep_init(struct acpi_device *adev)
if (dep->honor_dep)
adev->flags.honor_deps = 1;
- adev->dep_unmet++;
+ if (!dep->met)
+ adev->dep_unmet++;
}
}
}
@@ -1961,7 +1962,7 @@ bool acpi_device_is_present(const struct acpi_device *adev)
bool acpi_device_is_enabled(const struct acpi_device *adev)
{
- return adev->status.present && adev->status.enabled;
+ return adev->status.enabled;
}
static bool acpi_scan_handler_matching(struct acpi_scan_handler *handler,
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 9fdcc620c652..2cc3821b2b16 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -499,6 +499,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
{
.callback = video_detect_force_native,
+ /* Lenovo Slim 7 16ARH7 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82UX"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
/* Lenovo ThinkPad X131e (3371 AMD version) */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/acpi/x86/Makefile b/drivers/acpi/x86/Makefile
new file mode 100644
index 000000000000..63c99509ed9d
--- /dev/null
+++ b/drivers/acpi/x86/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_ACPI) += acpi-x86.o
+acpi-x86-y += apple.o
+acpi-x86-y += cmos_rtc.o
+acpi-x86-$(CONFIG_PCI) += lpss.o
+acpi-x86-y += s2idle.o
+acpi-x86-y += utils.o
+
+obj-$(CONFIG_X86) += blacklist.o
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/x86/blacklist.c
index a558d24fb788..55214d0a12b1 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/x86/blacklist.c
@@ -17,7 +17,7 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
-#include "internal.h"
+#include "../internal.h"
#ifdef CONFIG_DMI
static const struct dmi_system_id acpi_rev_dmi_table[] __initconst;
diff --git a/drivers/acpi/acpi_cmos_rtc.c b/drivers/acpi/x86/cmos_rtc.c
index 9b55d1593d16..51643ff6fe5f 100644
--- a/drivers/acpi/acpi_cmos_rtc.c
+++ b/drivers/acpi/x86/cmos_rtc.c
@@ -15,7 +15,7 @@
#include <linux/module.h>
#include <linux/mc146818rtc.h>
-#include "internal.h"
+#include "../internal.h"
static const struct acpi_device_id acpi_cmos_rtc_ids[] = {
{ "PNP0B00" },
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/x86/lpss.c
index 04e273167e92..148e29c2c526 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/x86/lpss.c
@@ -25,7 +25,7 @@
#include <linux/suspend.h>
#include <linux/delay.h>
-#include "internal.h"
+#include "../internal.h"
#ifdef CONFIG_X86_INTEL_LPSS
@@ -325,6 +325,7 @@ static const struct lpss_device_desc bsw_i2c_dev_desc = {
static const struct property_entry bsw_spi_properties[] = {
PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BSW_SSP),
+ PROPERTY_ENTRY_U32("num-cs", 2),
{ }
};
@@ -886,10 +887,8 @@ static int acpi_lpss_activate(struct device *dev)
if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE))
lpss_deassert_reset(pdata);
-#ifdef CONFIG_PM
if (pdata->dev_desc->flags & LPSS_SAVE_CTX_ONCE)
acpi_lpss_save_ctx(dev, pdata);
-#endif
return 0;
}
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index cd84af23f7ea..dd0b40b9bbe8 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -492,16 +492,14 @@ static int lps0_device_attach(struct acpi_device *adev,
unsigned int func_mask;
/*
- * Avoid evaluating the same _DSM function for two
- * different UUIDs and prioritize the MSFT one.
+ * Log a message if the _DSM function sets for two
+ * different UUIDs overlap.
*/
func_mask = lps0_dsm_func_mask & lps0_dsm_func_mask_microsoft;
- if (func_mask) {
+ if (func_mask)
acpi_handle_info(adev->handle,
"Duplicate LPS0 _DSM functions (mask: 0x%x)\n",
func_mask);
- lps0_dsm_func_mask &= ~func_mask;
- }
}
}
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 90c3d2eab9e9..7dca73417e2b 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -101,6 +101,15 @@ static const struct override_status_id override_status_ids[] = {
}),
/*
+ * The Dell XPS 15 9550 has a SMO8110 accelerometer /
+ * HDD freefall sensor which is wrongly marked as not present.
+ */
+ PRESENT_ENTRY_HID("SMO8810", "1", SKYLAKE, {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9550"),
+ }),
+
+ /*
* The GPD win BIOS dated 20170221 has disabled the accelerometer, the
* drivers sometimes cause crashes under Windows and this is how the
* manufacturer has solved this :| The DMI match may not seem unique,
@@ -260,9 +269,10 @@ bool force_storage_d3(void)
#define ACPI_QUIRK_SKIP_I2C_CLIENTS BIT(0)
#define ACPI_QUIRK_UART1_SKIP BIT(1)
#define ACPI_QUIRK_UART1_TTY_UART2_SKIP BIT(2)
-#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(3)
-#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(4)
-#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS BIT(5)
+#define ACPI_QUIRK_PNP_UART1_SKIP BIT(3)
+#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(4)
+#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(5)
+#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS BIT(6)
static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
/*
@@ -342,6 +352,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_PNP_UART1_SKIP |
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
},
{
@@ -440,14 +451,18 @@ static int acpi_dmi_skip_serdev_enumeration(struct device *controller_parent, bo
if (ret)
return 0;
- /* to not match on PNP enumerated debug UARTs */
- if (!dev_is_platform(controller_parent))
- return 0;
-
dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
if (dmi_id)
quirks = (unsigned long)dmi_id->driver_data;
+ if (!dev_is_platform(controller_parent)) {
+ /* PNP enumerated UARTs */
+ if ((quirks & ACPI_QUIRK_PNP_UART1_SKIP) && uid == 1)
+ *skip = true;
+
+ return 0;
+ }
+
if ((quirks & ACPI_QUIRK_UART1_SKIP) && uid == 1)
*skip = true;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index bad28cf42010..dd6923d37931 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1708,8 +1708,10 @@ static size_t binder_get_object(struct binder_proc *proc,
size_t object_size = 0;
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
- if (offset > buffer->data_size || read_size < sizeof(*hdr))
+ if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
+ !IS_ALIGNED(offset, sizeof(u32)))
return 0;
+
if (u) {
if (copy_from_user(object, u + offset, read_size))
return 0;
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 562302e2e57c..6548f10e61d9 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -666,6 +666,87 @@ static int mobile_lpm_policy = -1;
module_param(mobile_lpm_policy, int, 0644);
MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
+static char *ahci_mask_port_map;
+module_param_named(mask_port_map, ahci_mask_port_map, charp, 0444);
+MODULE_PARM_DESC(mask_port_map,
+ "32-bits port map masks to ignore controllers ports. "
+ "Valid values are: "
+ "\"<mask>\" to apply the same mask to all AHCI controller "
+ "devices, and \"<pci_dev>=<mask>,<pci_dev>=<mask>,...\" to "
+ "specify different masks for the controllers specified, "
+ "where <pci_dev> is the PCI ID of an AHCI controller in the "
+ "form \"domain:bus:dev.func\"");
+
+static void ahci_apply_port_map_mask(struct device *dev,
+ struct ahci_host_priv *hpriv, char *mask_s)
+{
+ unsigned int mask;
+
+ if (kstrtouint(mask_s, 0, &mask)) {
+ dev_err(dev, "Invalid port map mask\n");
+ return;
+ }
+
+ hpriv->mask_port_map = mask;
+}
+
+static void ahci_get_port_map_mask(struct device *dev,
+ struct ahci_host_priv *hpriv)
+{
+ char *param, *end, *str, *mask_s;
+ char *name;
+
+ if (!strlen(ahci_mask_port_map))
+ return;
+
+ str = kstrdup(ahci_mask_port_map, GFP_KERNEL);
+ if (!str)
+ return;
+
+ /* Handle single mask case */
+ if (!strchr(str, '=')) {
+ ahci_apply_port_map_mask(dev, hpriv, str);
+ goto free;
+ }
+
+ /*
+ * Mask list case: parse the parameter to apply the mask only if
+ * the device name matches.
+ */
+ param = str;
+ end = param + strlen(param);
+ while (param && param < end && *param) {
+ name = param;
+ param = strchr(name, '=');
+ if (!param)
+ break;
+
+ *param = '\0';
+ param++;
+ if (param >= end)
+ break;
+
+ if (strcmp(dev_name(dev), name) != 0) {
+ param = strchr(param, ',');
+ if (param)
+ param++;
+ continue;
+ }
+
+ mask_s = param;
+ param = strchr(mask_s, ',');
+ if (param) {
+ *param = '\0';
+ param++;
+ }
+
+ ahci_apply_port_map_mask(dev, hpriv, mask_s);
+ }
+
+free:
+ kfree(str);
+}
+
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
struct ahci_host_priv *hpriv)
{
@@ -688,6 +769,10 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
"Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
}
+ /* Handle port map masks passed as module parameter. */
+ if (ahci_mask_port_map)
+ ahci_get_port_map_mask(&pdev->dev, hpriv);
+
ahci_save_initial_config(&pdev->dev, hpriv);
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index be3412cdb22e..c449d60d9bb9 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2539,7 +2539,7 @@ static void ata_dev_config_cdl(struct ata_device *dev)
bool cdl_enabled;
u64 val;
- if (ata_id_major_version(dev->id) < 12)
+ if (ata_id_major_version(dev->id) < 11)
goto not_supported;
if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE) ||
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 2f4c58837641..e954976891a9 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -4745,7 +4745,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
* bail out.
*/
if (ap->pflags & ATA_PFLAG_SUSPENDED)
- goto unlock;
+ goto unlock_ap;
if (!sdev)
continue;
@@ -4758,7 +4758,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
if (do_resume) {
ret = scsi_resume_device(sdev);
if (ret == -EWOULDBLOCK)
- goto unlock;
+ goto unlock_scan;
dev->flags &= ~ATA_DFLAG_RESUMING;
}
ret = scsi_rescan_device(sdev);
@@ -4766,12 +4766,13 @@ void ata_scsi_dev_rescan(struct work_struct *work)
spin_lock_irqsave(ap->lock, flags);
if (ret)
- goto unlock;
+ goto unlock_ap;
}
}
-unlock:
+unlock_ap:
spin_unlock_irqrestore(ap->lock, flags);
+unlock_scan:
mutex_unlock(&ap->scsi_scan_mutex);
/* Reschedule with a delay if scsi_rescan_device() returned an error */
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 151d95f96b11..69d2138d7efb 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -16,6 +16,9 @@ menuconfig AUXDISPLAY
if AUXDISPLAY
+#
+# Character LCD section
+#
config CHARLCD
tristate "Character LCD core support" if COMPILE_TEST
help
@@ -25,12 +28,6 @@ config CHARLCD
This is some character LCD core interface that multiple drivers can
use.
-config LINEDISP
- tristate "Character line display core support" if COMPILE_TEST
- help
- This is the core support for single-line character displays, to be
- selected by drivers that use it.
-
config HD44780_COMMON
tristate "Common functions for HD44780 (and compatibles) LCD displays" if COMPILE_TEST
select CHARLCD
@@ -52,145 +49,6 @@ config HD44780
kernel and started at boot.
If you don't understand what all this is about, say N.
-config KS0108
- tristate "KS0108 LCD Controller"
- depends on PARPORT_PC
- default n
- help
- If you have a LCD controlled by one or more KS0108
- controllers, say Y. You will need also another more specific
- driver for your LCD.
-
- Depends on Parallel Port support. If you say Y at
- parport, you will be able to compile this as a module (M)
- and built-in as well (Y).
-
- To compile this as a module, choose M here:
- the module will be called ks0108.
-
- If unsure, say N.
-
-config KS0108_PORT
- hex "Parallel port where the LCD is connected"
- depends on KS0108
- default 0x378
- help
- The address of the parallel port where the LCD is connected.
-
- The first standard parallel port address is 0x378.
- The second standard parallel port address is 0x278.
- The third standard parallel port address is 0x3BC.
-
- You can specify a different address if you need.
-
- If you don't know what I'm talking about, load the parport module,
- and execute "dmesg" or "cat /proc/ioports". You can see there how
- many parallel ports are present and which address each one has.
-
- Usually you only need to use 0x378.
-
- If you compile this as a module, you can still override this
- using the module parameters.
-
-config KS0108_DELAY
- int "Delay between each control writing (microseconds)"
- depends on KS0108
- default "2"
- help
- Amount of time the ks0108 should wait between each control write
- to the parallel port.
-
- If your LCD seems to miss random writings, increment this.
-
- If you don't know what I'm talking about, ignore it.
-
- If you compile this as a module, you can still override this
- value using the module parameters.
-
-config CFAG12864B
- tristate "CFAG12864B LCD"
- depends on X86
- depends on FB
- depends on KS0108
- select FB_SYSMEM_HELPERS
- default n
- help
- If you have a Crystalfontz 128x64 2-color LCD, cfag12864b Series,
- say Y. You also need the ks0108 LCD Controller driver.
-
- For help about how to wire your LCD to the parallel port,
- check Documentation/admin-guide/auxdisplay/cfag12864b.rst
-
- Depends on the x86 arch and the framebuffer support.
-
- The LCD framebuffer driver can be attached to a console.
- It will work fine. However, you can't attach it to the fbdev driver
- of the xorg server.
-
- To compile this as a module, choose M here:
- the modules will be called cfag12864b and cfag12864bfb.
-
- If unsure, say N.
-
-config CFAG12864B_RATE
- int "Refresh rate (hertz)"
- depends on CFAG12864B
- default "20"
- help
- Refresh rate of the LCD.
-
- As the LCD is not memory mapped, the driver has to make the work by
- software. This means you should be careful setting this value higher.
- If your CPUs are really slow or you feel the system is slowed down,
- decrease the value.
-
- Be careful modifying this value to a very high value:
- You can freeze the computer, or the LCD maybe can't draw as fast as you
- are requesting.
-
- If you don't know what I'm talking about, ignore it.
-
- If you compile this as a module, you can still override this
- value using the module parameters.
-
-config IMG_ASCII_LCD
- tristate "Imagination Technologies ASCII LCD Display"
- depends on HAS_IOMEM
- default y if MIPS_MALTA
- select MFD_SYSCON
- select LINEDISP
- help
- Enable this to support the simple ASCII LCD displays found on
- development boards such as the MIPS Boston, MIPS Malta & MIPS SEAD3
- from Imagination Technologies.
-
-config HT16K33
- tristate "Holtek Ht16K33 LED controller with keyscan"
- depends on FB && I2C && INPUT
- select FB_SYSMEM_HELPERS
- select INPUT_MATRIXKMAP
- select FB_BACKLIGHT
- select NEW_LEDS
- select LEDS_CLASS
- select LINEDISP
- help
- Say yes here to add support for Holtek HT16K33, RAM mapping 16*8
- LED controller driver with keyscan.
-
-config MAX6959
- tristate "Maxim MAX6958/6959 7-segment LED controller"
- depends on I2C
- select REGMAP_I2C
- select LINEDISP
- help
- If you say yes here you get support for the following Maxim chips
- (I2C 7-segment LED display controller):
- - MAX6958
- - MAX6959 (input support)
-
- This driver can also be built as a module. If so, the module
- will be called max6959.
-
config LCD2S
tristate "lcd2s 20x4 character display over I2C console"
depends on I2C
@@ -201,27 +59,6 @@ config LCD2S
is a simple single color character display. You have to connect it
to an I2C bus.
-config ARM_CHARLCD
- bool "ARM Ltd. Character LCD Driver"
- depends on PLAT_VERSATILE
- help
- This is a driver for the character LCD found on the ARM Ltd.
- Versatile and RealView Platform Baseboards. It doesn't do
- very much more than display the text "ARM Linux" on the first
- line and the Linux version on the second line, but that's
- still useful.
-
-config SEG_LED_GPIO
- tristate "Generic 7-segment LED display"
- depends on GPIOLIB || COMPILE_TEST
- select LINEDISP
- help
- This driver supports a generic 7-segment LED display made up
- of GPIO pins connected to the individual segments.
-
- This driver can also be built as a module. If so, the module
- will be called seg-led-gpio.
-
menuconfig PARPORT_PANEL
tristate "Parallel port LCD/Keypad Panel support"
depends on PARPORT
@@ -480,7 +317,6 @@ endif # PARPORT_PANEL
config PANEL_CHANGE_MESSAGE
bool "Change LCD initialization message ?"
depends on CHARLCD
- default "n"
help
This allows you to replace the boot message indicating the kernel version
and the driver version with a custom message. This is useful on appliances
@@ -529,8 +365,184 @@ choice
endchoice
+#
+# Samsung KS0108 LCD controller section
+#
+config KS0108
+ tristate "KS0108 LCD Controller"
+ depends on PARPORT_PC
+ help
+ If you have a LCD controlled by one or more KS0108
+ controllers, say Y. You will need also another more specific
+ driver for your LCD.
+
+ Depends on Parallel Port support. If you say Y at
+ parport, you will be able to compile this as a module (M)
+ and built-in as well (Y).
+
+ To compile this as a module, choose M here:
+ the module will be called ks0108.
+
+ If unsure, say N.
+
+config KS0108_PORT
+ hex "Parallel port where the LCD is connected"
+ depends on KS0108
+ default 0x378
+ help
+ The address of the parallel port where the LCD is connected.
+
+ The first standard parallel port address is 0x378.
+ The second standard parallel port address is 0x278.
+ The third standard parallel port address is 0x3BC.
+
+ You can specify a different address if you need.
+
+ If you don't know what I'm talking about, load the parport module,
+ and execute "dmesg" or "cat /proc/ioports". You can see there how
+ many parallel ports are present and which address each one has.
+
+ Usually you only need to use 0x378.
+
+ If you compile this as a module, you can still override this
+ using the module parameters.
+
+config KS0108_DELAY
+ int "Delay between each control writing (microseconds)"
+ depends on KS0108
+ default "2"
+ help
+ Amount of time the ks0108 should wait between each control write
+ to the parallel port.
+
+ If your LCD seems to miss random writings, increment this.
+
+ If you don't know what I'm talking about, ignore it.
+
+ If you compile this as a module, you can still override this
+ value using the module parameters.
+
+config CFAG12864B
+ tristate "CFAG12864B LCD"
+ depends on X86
+ depends on FB
+ depends on KS0108
+ select FB_SYSMEM_HELPERS
+ help
+ If you have a Crystalfontz 128x64 2-color LCD, cfag12864b Series,
+ say Y. You also need the ks0108 LCD Controller driver.
+
+ For help about how to wire your LCD to the parallel port,
+ check Documentation/admin-guide/auxdisplay/cfag12864b.rst
+
+ Depends on the x86 arch and the framebuffer support.
+
+ The LCD framebuffer driver can be attached to a console.
+ It will work fine. However, you can't attach it to the fbdev driver
+ of the xorg server.
+
+ To compile this as a module, choose M here:
+ the modules will be called cfag12864b and cfag12864bfb.
+
+ If unsure, say N.
+
+config CFAG12864B_RATE
+ int "Refresh rate (hertz)"
+ depends on CFAG12864B
+ default "20"
+ help
+ Refresh rate of the LCD.
+
+ As the LCD is not memory mapped, the driver has to make the work by
+ software. This means you should be careful setting this value higher.
+ If your CPUs are really slow or you feel the system is slowed down,
+ decrease the value.
+
+ Be careful modifying this value to a very high value:
+ You can freeze the computer, or the LCD maybe can't draw as fast as you
+ are requesting.
+
+ If you don't know what I'm talking about, ignore it.
+
+ If you compile this as a module, you can still override this
+ value using the module parameters.
+
+#
+# Single character line display section
+#
+config LINEDISP
+ tristate "Character line display core support" if COMPILE_TEST
+ help
+ This is the core support for single-line character displays, to be
+ selected by drivers that use it.
+
+config IMG_ASCII_LCD
+ tristate "Imagination Technologies ASCII LCD Display"
+ depends on HAS_IOMEM
+ default y if MIPS_MALTA
+ select MFD_SYSCON
+ select LINEDISP
+ help
+ Enable this to support the simple ASCII LCD displays found on
+ development boards such as the MIPS Boston, MIPS Malta & MIPS SEAD3
+ from Imagination Technologies.
+
+config HT16K33
+ tristate "Holtek Ht16K33 LED controller with keyscan"
+ depends on FB && I2C && INPUT
+ select FB_SYSMEM_HELPERS
+ select INPUT_MATRIXKMAP
+ select FB_BACKLIGHT
+ select NEW_LEDS
+ select LEDS_CLASS
+ select LINEDISP
+ help
+ Say yes here to add support for Holtek HT16K33, RAM mapping 16*8
+ LED controller driver with keyscan.
+
+config MAX6959
+ tristate "Maxim MAX6958/6959 7-segment LED controller"
+ depends on I2C
+ select REGMAP_I2C
+ select LINEDISP
+ help
+ If you say yes here you get support for the following Maxim chips
+ (I2C 7-segment LED display controller):
+ - MAX6958
+ - MAX6959 (input support)
+
+ This driver can also be built as a module. If so, the module
+ will be called max6959.
+
+config SEG_LED_GPIO
+ tristate "Generic 7-segment LED display"
+ depends on GPIOLIB || COMPILE_TEST
+ select LINEDISP
+ help
+ This driver supports a generic 7-segment LED display made up
+ of GPIO pins connected to the individual segments.
+
+ This driver can also be built as a module. If so, the module
+ will be called seg-led-gpio.
+
+#
+# Character LCD with non-conforming interface section
+#
+config ARM_CHARLCD
+ bool "ARM Ltd. Character LCD Driver"
+ depends on PLAT_VERSATILE
+ help
+ This is a driver for the character LCD found on the ARM Ltd.
+ Versatile and RealView Platform Baseboards. It doesn't do
+ very much more than display the text "ARM Linux" on the first
+ line and the Linux version on the second line, but that's
+ still useful.
+
endif # AUXDISPLAY
+#
+# Deprecated options
+#
config PANEL
tristate "Parallel port LCD/Keypad Panel support (OLD OPTION)"
depends on PARPORT
diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile
index 4a8ea41b0550..f5c13ed1cd4f 100644
--- a/drivers/auxdisplay/Makefile
+++ b/drivers/auxdisplay/Makefile
@@ -3,16 +3,16 @@
# Makefile for the kernel auxiliary displays device drivers.
#
-obj-$(CONFIG_CHARLCD) += charlcd.o
-obj-$(CONFIG_HD44780_COMMON) += hd44780_common.o
obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
-obj-$(CONFIG_KS0108) += ks0108.o
obj-$(CONFIG_CFAG12864B) += cfag12864b.o cfag12864bfb.o
-obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o
+obj-$(CONFIG_CHARLCD) += charlcd.o
+obj-$(CONFIG_HD44780_COMMON) += hd44780_common.o
obj-$(CONFIG_HD44780) += hd44780.o
obj-$(CONFIG_HT16K33) += ht16k33.o
-obj-$(CONFIG_PARPORT_PANEL) += panel.o
+obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o
+obj-$(CONFIG_KS0108) += ks0108.o
obj-$(CONFIG_LCD2S) += lcd2s.o
obj-$(CONFIG_LINEDISP) += line-display.o
obj-$(CONFIG_MAX6959) += max6959.o
+obj-$(CONFIG_PARPORT_PANEL) += panel.o
obj-$(CONFIG_SEG_LED_GPIO) += seg-led-gpio.o
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 6d309e4971b6..bb9463814454 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -17,7 +17,9 @@
#include <linux/uaccess.h>
#include <linux/workqueue.h>
+#ifndef CONFIG_PANEL_BOOT_MESSAGE
#include <generated/utsrelease.h>
+#endif
#include "charlcd.h"
@@ -678,4 +680,5 @@ int charlcd_unregister(struct charlcd *lcd)
}
EXPORT_SYMBOL_GPL(charlcd_unregister);
+MODULE_DESCRIPTION("Character LCD core support");
MODULE_LICENSE("GPL");
diff --git a/drivers/auxdisplay/seg-led-gpio.c b/drivers/auxdisplay/seg-led-gpio.c
index 35a8dbb1e9d2..183ab3011cbb 100644
--- a/drivers/auxdisplay/seg-led-gpio.c
+++ b/drivers/auxdisplay/seg-led-gpio.c
@@ -81,14 +81,12 @@ static int seg_led_probe(struct platform_device *pdev)
return linedisp_register(&priv->linedisp, dev, 1, &seg_led_linedisp_ops);
}
-static int seg_led_remove(struct platform_device *pdev)
+static void seg_led_remove(struct platform_device *pdev)
{
struct seg_led_priv *priv = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&priv->work);
linedisp_unregister(&priv->linedisp);
-
- return 0;
}
static const struct of_device_id seg_led_of_match[] = {
@@ -99,7 +97,7 @@ MODULE_DEVICE_TABLE(of, seg_led_of_match);
static struct platform_driver seg_led_driver = {
.probe = seg_led_probe,
- .remove = seg_led_remove,
+ .remove_new = seg_led_remove,
.driver = {
.name = "seg-led-gpio",
.of_match_table = seg_led_of_match,
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 024b78a0cfc1..0248912ff687 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -22,7 +22,7 @@
#include <linux/units.h>
#define CREATE_TRACE_POINTS
-#include <trace/events/thermal_pressure.h>
+#include <trace/events/hw_pressure.h>
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
@@ -160,26 +160,26 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
per_cpu(cpu_scale, cpu) = capacity;
}
-DEFINE_PER_CPU(unsigned long, thermal_pressure);
+DEFINE_PER_CPU(unsigned long, hw_pressure);
/**
- * topology_update_thermal_pressure() - Update thermal pressure for CPUs
+ * topology_update_hw_pressure() - Update HW pressure for CPUs
* @cpus : The related CPUs for which capacity has been reduced
* @capped_freq : The maximum allowed frequency that CPUs can run at
*
- * Update the value of thermal pressure for all @cpus in the mask. The
+ * Update the value of HW pressure for all @cpus in the mask. The
* cpumask should include all (online+offline) affected CPUs, to avoid
* operating on stale data when hot-plug is used for some CPUs. The
* @capped_freq reflects the currently allowed max CPUs frequency due to
- * thermal capping. It might be also a boost frequency value, which is bigger
+ * HW capping. It might be also a boost frequency value, which is bigger
* than the internal 'capacity_freq_ref' max frequency. In such case the
* pressure value should simply be removed, since this is an indication that
- * there is no thermal throttling. The @capped_freq must be provided in kHz.
+ * there is no HW throttling. The @capped_freq must be provided in kHz.
*/
-void topology_update_thermal_pressure(const struct cpumask *cpus,
+void topology_update_hw_pressure(const struct cpumask *cpus,
unsigned long capped_freq)
{
- unsigned long max_capacity, capacity, th_pressure;
+ unsigned long max_capacity, capacity, hw_pressure;
u32 max_freq;
int cpu;
@@ -189,21 +189,21 @@ void topology_update_thermal_pressure(const struct cpumask *cpus,
/*
* Handle properly the boost frequencies, which should simply clean
- * the thermal pressure value.
+ * the HW pressure value.
*/
if (max_freq <= capped_freq)
capacity = max_capacity;
else
capacity = mult_frac(max_capacity, capped_freq, max_freq);
- th_pressure = max_capacity - capacity;
+ hw_pressure = max_capacity - capacity;
- trace_thermal_pressure_update(cpu, th_pressure);
+ trace_hw_pressure_update(cpu, hw_pressure);
for_each_cpu(cpu, cpus)
- WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
+ WRITE_ONCE(per_cpu(hw_pressure, cpu), hw_pressure);
}
-EXPORT_SYMBOL_GPL(topology_update_thermal_pressure);
+EXPORT_SYMBOL_GPL(topology_update_hw_pressure);
static ssize_t cpu_capacity_show(struct device *dev,
struct device_attribute *attr,
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 5679f966f676..4a67e83300e1 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -208,7 +208,7 @@ static ktime_t initcall_debug_start(struct device *dev, void *cb)
if (!pm_print_times_enabled)
return 0;
- dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
+ dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
task_pid_nr(current),
dev->parent ? dev_name(dev->parent) : "none");
return ktime_get();
@@ -223,7 +223,7 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,
return;
rettime = ktime_get();
- dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
+ dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
(unsigned long long)ktime_us_delta(rettime, calltime));
}
@@ -1927,7 +1927,7 @@ EXPORT_SYMBOL_GPL(dpm_suspend_start);
void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
{
if (ret)
- dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
+ dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret);
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index a917219feea6..752b417e8129 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -451,16 +451,15 @@ static struct wakeup_source *device_wakeup_detach(struct device *dev)
* Detach the @dev's wakeup source object from it, unregister this wakeup source
* object and destroy it.
*/
-int device_wakeup_disable(struct device *dev)
+void device_wakeup_disable(struct device *dev)
{
struct wakeup_source *ws;
if (!dev || !dev->power.can_wakeup)
- return -EINVAL;
+ return;
ws = device_wakeup_detach(dev);
wakeup_source_unregister(ws);
- return 0;
}
EXPORT_SYMBOL_GPL(device_wakeup_disable);
@@ -502,7 +501,11 @@ EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
*/
int device_set_wakeup_enable(struct device *dev, bool enable)
{
- return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
+ if (enable)
+ return device_wakeup_enable(dev);
+
+ device_wakeup_disable(dev);
+ return 0;
}
EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index bcdb25bec77c..83acccdc1008 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -326,20 +326,22 @@ struct regmap_ram_data {
* Create a test register map with data stored in RAM, not intended
* for practical use.
*/
-struct regmap *__regmap_init_ram(const struct regmap_config *config,
+struct regmap *__regmap_init_ram(struct device *dev,
+ const struct regmap_config *config,
struct regmap_ram_data *data,
struct lock_class_key *lock_key,
const char *lock_name);
-#define regmap_init_ram(config, data) \
- __regmap_lockdep_wrapper(__regmap_init_ram, #config, config, data)
+#define regmap_init_ram(dev, config, data) \
+ __regmap_lockdep_wrapper(__regmap_init_ram, #dev, dev, config, data)
-struct regmap *__regmap_init_raw_ram(const struct regmap_config *config,
+struct regmap *__regmap_init_raw_ram(struct device *dev,
+ const struct regmap_config *config,
struct regmap_ram_data *data,
struct lock_class_key *lock_key,
const char *lock_name);
-#define regmap_init_raw_ram(config, data) \
- __regmap_lockdep_wrapper(__regmap_init_raw_ram, #config, config, data)
+#define regmap_init_raw_ram(dev, config, data) \
+ __regmap_lockdep_wrapper(__regmap_init_raw_ram, #dev, dev, config, data)
#endif
diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
index 55999a50ccc0..e42433404854 100644
--- a/drivers/base/regmap/regcache-maple.c
+++ b/drivers/base/regmap/regcache-maple.c
@@ -294,7 +294,7 @@ static int regcache_maple_exit(struct regmap *map)
{
struct maple_tree *mt = map->cache;
MA_STATE(mas, mt, 0, UINT_MAX);
- unsigned int *entry;;
+ unsigned int *entry;
/* if we've already been called then just return */
if (!mt)
diff --git a/drivers/base/regmap/regmap-i3c.c b/drivers/base/regmap/regmap-i3c.c
index 0328b0b34284..b5300b7c477e 100644
--- a/drivers/base/regmap/regmap-i3c.c
+++ b/drivers/base/regmap/regmap-i3c.c
@@ -56,5 +56,5 @@ struct regmap *__devm_regmap_init_i3c(struct i3c_device *i3c,
EXPORT_SYMBOL_GPL(__devm_regmap_init_i3c);
MODULE_AUTHOR("Vitor Soares <vitor.soares@synopsys.com>");
-MODULE_DESCRIPTION("Regmap I3C Module");
+MODULE_DESCRIPTION("regmap I3C Module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index bb2ab6129f38..9c5314785fc2 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -4,11 +4,26 @@
//
// Copyright 2023 Arm Ltd
+#include <kunit/device.h>
+#include <kunit/resource.h>
#include <kunit/test.h>
#include "internal.h"
#define BLOCK_TEST_SIZE 12
+KUNIT_DEFINE_ACTION_WRAPPER(regmap_exit_action, regmap_exit, struct regmap *);
+
+struct regmap_test_priv {
+ struct device *dev;
+};
+
+struct regmap_test_param {
+ enum regcache_type cache;
+ enum regmap_endian val_endian;
+
+ unsigned int from_reg;
+};
+
static void get_changed_bytes(void *orig, void *new, size_t size)
{
char *o = orig;
@@ -27,57 +42,128 @@ static void get_changed_bytes(void *orig, void *new, size_t size)
}
static const struct regmap_config test_regmap_config = {
- .max_register = BLOCK_TEST_SIZE,
.reg_stride = 1,
.val_bits = sizeof(unsigned int) * 8,
};
-struct regcache_types {
- enum regcache_type type;
- const char *name;
-};
+static const char *regcache_type_name(enum regcache_type type)
+{
+ switch (type) {
+ case REGCACHE_NONE:
+ return "none";
+ case REGCACHE_FLAT:
+ return "flat";
+ case REGCACHE_RBTREE:
+ return "rbtree";
+ case REGCACHE_MAPLE:
+ return "maple";
+ default:
+ return NULL;
+ }
+}
+
+static const char *regmap_endian_name(enum regmap_endian endian)
+{
+ switch (endian) {
+ case REGMAP_ENDIAN_BIG:
+ return "big";
+ case REGMAP_ENDIAN_LITTLE:
+ return "little";
+ case REGMAP_ENDIAN_DEFAULT:
+ return "default";
+ case REGMAP_ENDIAN_NATIVE:
+ return "native";
+ default:
+ return NULL;
+ }
+}
-static void case_to_desc(const struct regcache_types *t, char *desc)
+static void param_to_desc(const struct regmap_test_param *param, char *desc)
{
- strcpy(desc, t->name);
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s @%#x",
+ regcache_type_name(param->cache),
+ regmap_endian_name(param->val_endian),
+ param->from_reg);
}
-static const struct regcache_types regcache_types_list[] = {
- { REGCACHE_NONE, "none" },
- { REGCACHE_FLAT, "flat" },
- { REGCACHE_RBTREE, "rbtree" },
- { REGCACHE_MAPLE, "maple" },
+static const struct regmap_test_param regcache_types_list[] = {
+ { .cache = REGCACHE_NONE },
+ { .cache = REGCACHE_FLAT },
+ { .cache = REGCACHE_RBTREE },
+ { .cache = REGCACHE_MAPLE },
};
-KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, case_to_desc);
+KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, param_to_desc);
-static const struct regcache_types real_cache_types_list[] = {
- { REGCACHE_FLAT, "flat" },
- { REGCACHE_RBTREE, "rbtree" },
- { REGCACHE_MAPLE, "maple" },
+static const struct regmap_test_param real_cache_types_only_list[] = {
+ { .cache = REGCACHE_FLAT },
+ { .cache = REGCACHE_RBTREE },
+ { .cache = REGCACHE_MAPLE },
};
-KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, case_to_desc);
+KUNIT_ARRAY_PARAM(real_cache_types_only, real_cache_types_only_list, param_to_desc);
+
+static const struct regmap_test_param real_cache_types_list[] = {
+ { .cache = REGCACHE_FLAT, .from_reg = 0 },
+ { .cache = REGCACHE_FLAT, .from_reg = 0x2001 },
+ { .cache = REGCACHE_FLAT, .from_reg = 0x2002 },
+ { .cache = REGCACHE_FLAT, .from_reg = 0x2003 },
+ { .cache = REGCACHE_FLAT, .from_reg = 0x2004 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 },
+};
-static const struct regcache_types sparse_cache_types_list[] = {
- { REGCACHE_RBTREE, "rbtree" },
- { REGCACHE_MAPLE, "maple" },
+KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, param_to_desc);
+
+static const struct regmap_test_param sparse_cache_types_list[] = {
+ { .cache = REGCACHE_RBTREE, .from_reg = 0 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 },
};
-KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, case_to_desc);
+KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, param_to_desc);
-static struct regmap *gen_regmap(struct regmap_config *config,
+static struct regmap *gen_regmap(struct kunit *test,
+ struct regmap_config *config,
struct regmap_ram_data **data)
{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap_test_priv *priv = test->priv;
unsigned int *buf;
struct regmap *ret;
- size_t size = (config->max_register + 1) * sizeof(unsigned int);
+ size_t size;
int i;
struct reg_default *defaults;
+ config->cache_type = param->cache;
config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
config->cache_type == REGCACHE_MAPLE;
+ if (config->max_register == 0) {
+ config->max_register = param->from_reg;
+ if (config->num_reg_defaults)
+ config->max_register += (config->num_reg_defaults - 1) *
+ config->reg_stride;
+ else
+ config->max_register += (BLOCK_TEST_SIZE * config->reg_stride);
+ }
+
+ size = (config->max_register + 1) * sizeof(unsigned int);
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -98,37 +184,40 @@ static struct regmap *gen_regmap(struct regmap_config *config,
config->reg_defaults = defaults;
for (i = 0; i < config->num_reg_defaults; i++) {
- defaults[i].reg = i * config->reg_stride;
- defaults[i].def = buf[i * config->reg_stride];
+ defaults[i].reg = param->from_reg + (i * config->reg_stride);
+ defaults[i].def = buf[param->from_reg + (i * config->reg_stride)];
}
}
- ret = regmap_init_ram(config, *data);
+ ret = regmap_init_ram(priv->dev, config, *data);
if (IS_ERR(ret)) {
kfree(buf);
kfree(*data);
+ } else {
+ kunit_add_action(test, regmap_exit_action, ret);
}
return ret;
}
-static bool reg_5_false(struct device *context, unsigned int reg)
+static bool reg_5_false(struct device *dev, unsigned int reg)
{
- return reg != 5;
+ struct kunit *test = dev_get_drvdata(dev);
+ const struct regmap_test_param *param = test->param_value;
+
+ return reg != (param->from_reg + 5);
}
static void basic_read_write(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val, rval;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -141,14 +230,11 @@ static void basic_read_write(struct kunit *test)
KUNIT_EXPECT_EQ(test, val, rval);
/* If using a cache the cache satisfied the read */
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[0]);
-
- regmap_exit(map);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[0]);
}
static void bulk_write(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -156,9 +242,8 @@ static void bulk_write(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -178,14 +263,11 @@ static void bulk_write(struct kunit *test)
/* If using a cache the cache satisfied the read */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
-
- regmap_exit(map);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
static void bulk_read(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -193,9 +275,8 @@ static void bulk_read(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -211,14 +292,140 @@ static void bulk_read(struct kunit *test)
/* If using a cache the cache satisfied the read */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
+}
- regmap_exit(map);
+static void read_bypassed(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val[BLOCK_TEST_SIZE], rval;
+ int i;
+
+ config = test_regmap_config;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+
+ get_random_bytes(&val, sizeof(val));
+
+ /* Write some test values */
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
+
+ regcache_cache_only(map, true);
+
+ /*
+ * While in cache-only regmap_read_bypassed() should return the register
+ * value and leave the map in cache-only.
+ */
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ /* Put inverted bits in rval to prove we really read the value */
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_EQ(test, val[i], rval);
+
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_EQ(test, val[i], rval);
+ KUNIT_EXPECT_TRUE(test, map->cache_only);
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+ }
+
+ /*
+ * Change the underlying register values to prove it is returning
+ * real values not cached values.
+ */
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ val[i] = ~val[i];
+ data->vals[param->from_reg + i] = val[i];
+ }
+
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_NE(test, val[i], rval);
+
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_EQ(test, val[i], rval);
+ KUNIT_EXPECT_TRUE(test, map->cache_only);
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+ }
+}
+
+static void read_bypassed_volatile(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val[BLOCK_TEST_SIZE], rval;
+ int i;
+
+ config = test_regmap_config;
+ /* All registers except #5 volatile */
+ config.volatile_reg = reg_5_false;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+
+ get_random_bytes(&val, sizeof(val));
+
+ /* Write some test values */
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
+
+ regcache_cache_only(map, true);
+
+ /*
+ * While in cache-only regmap_read_bypassed() should return the register
+ * value and leave the map in cache-only.
+ */
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ /* Register #5 is non-volatile so should read from cache */
+ KUNIT_EXPECT_EQ(test, (i == 5) ? 0 : -EBUSY,
+ regmap_read(map, param->from_reg + i, &rval));
+
+ /* Put inverted bits in rval to prove we really read the value */
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_EQ(test, val[i], rval);
+ KUNIT_EXPECT_TRUE(test, map->cache_only);
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+ }
+
+ /*
+ * Change the underlying register values to prove it is returning
+ * real values not cached values.
+ */
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ val[i] = ~val[i];
+ data->vals[param->from_reg + i] = val[i];
+ }
+
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ if (i == 5)
+ continue;
+
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_EQ(test, val[i], rval);
+ KUNIT_EXPECT_TRUE(test, map->cache_only);
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+ }
}
static void write_readonly(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -226,11 +433,10 @@ static void write_readonly(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
config.writeable_reg = reg_5_false;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -247,13 +453,10 @@ static void write_readonly(struct kunit *test)
/* Did that match what we see on the device? */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
-
- regmap_exit(map);
}
static void read_writeonly(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -261,10 +464,9 @@ static void read_writeonly(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.readable_reg = reg_5_false;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -277,7 +479,7 @@ static void read_writeonly(struct kunit *test)
* fail if we aren't using the flat cache.
*/
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
- if (t->type != REGCACHE_FLAT) {
+ if (config.cache_type != REGCACHE_FLAT) {
KUNIT_EXPECT_EQ(test, i != 5,
regmap_read(map, i, &val) == 0);
} else {
@@ -287,13 +489,10 @@ static void read_writeonly(struct kunit *test)
/* Did we trigger a hardware access? */
KUNIT_EXPECT_FALSE(test, data->read[5]);
-
- regmap_exit(map);
}
static void reg_defaults(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -301,10 +500,9 @@ static void reg_defaults(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -316,12 +514,11 @@ static void reg_defaults(struct kunit *test)
/* The data should have been read from cache if there was one */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
static void reg_defaults_read_dev(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -329,17 +526,16 @@ static void reg_defaults_read_dev(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* We should have read the cache defaults back from the map */
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
- KUNIT_EXPECT_EQ(test, t->type != REGCACHE_NONE, data->read[i]);
+ KUNIT_EXPECT_EQ(test, config.cache_type != REGCACHE_NONE, data->read[i]);
data->read[i] = false;
}
@@ -350,12 +546,11 @@ static void reg_defaults_read_dev(struct kunit *test)
/* The data should have been read from cache if there was one */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
static void register_patch(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -365,10 +560,9 @@ static void register_patch(struct kunit *test)
/* We need defaults so readback works */
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -401,13 +595,10 @@ static void register_patch(struct kunit *test)
break;
}
}
-
- regmap_exit(map);
}
static void stride(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -415,11 +606,10 @@ static void stride(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.reg_stride = 2;
config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -437,15 +627,13 @@ static void stride(struct kunit *test)
} else {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
KUNIT_EXPECT_EQ(test, data->vals[i], rval);
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE,
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE,
data->read[i]);
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
KUNIT_EXPECT_TRUE(test, data->written[i]);
}
}
-
- regmap_exit(map);
}
static struct regmap_range_cfg test_range = {
@@ -481,7 +669,6 @@ static bool test_range_all_volatile(struct device *dev, unsigned int reg)
static void basic_ranges(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -489,13 +676,12 @@ static void basic_ranges(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.volatile_reg = test_range_all_volatile;
config.ranges = &test_range;
config.num_ranges = 1;
config.max_register = test_range.range_max;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -546,14 +732,11 @@ static void basic_ranges(struct kunit *test)
KUNIT_EXPECT_FALSE(test, data->read[i]);
KUNIT_EXPECT_FALSE(test, data->written[i]);
}
-
- regmap_exit(map);
}
/* Try to stress dynamic creation of cache data structures */
static void stress_insert(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -562,10 +745,9 @@ static void stress_insert(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.max_register = 300;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -599,24 +781,21 @@ static void stress_insert(struct kunit *test)
for (i = 0; i < config.max_register; i ++) {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
KUNIT_EXPECT_EQ(test, rval, vals[i]);
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
-
- regmap_exit(map);
}
static void cache_bypass(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val, rval;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -624,28 +803,26 @@ static void cache_bypass(struct kunit *test)
get_random_bytes(&val, sizeof(val));
/* Ensure the cache has a value in it */
- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val));
/* Bypass then write a different value */
regcache_cache_bypass(map, true);
- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val + 1));
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val + 1));
/* Read the bypassed value */
- KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
KUNIT_EXPECT_EQ(test, val + 1, rval);
- KUNIT_EXPECT_EQ(test, data->vals[0], rval);
+ KUNIT_EXPECT_EQ(test, data->vals[param->from_reg], rval);
/* Disable bypass, the cache should still return the original value */
regcache_cache_bypass(map, false);
- KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
KUNIT_EXPECT_EQ(test, val, rval);
-
- regmap_exit(map);
}
-static void cache_sync(struct kunit *test)
+static void cache_sync_marked_dirty(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -653,9 +830,8 @@ static void cache_sync(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -663,10 +839,10 @@ static void cache_sync(struct kunit *test)
get_random_bytes(&val, sizeof(val));
/* Put some data into the cache */
- KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
BLOCK_TEST_SIZE));
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->written[i] = false;
+ data->written[param->from_reg + i] = false;
/* Trash the data on the device itself then resync */
regcache_mark_dirty(map);
@@ -674,16 +850,63 @@ static void cache_sync(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* Did we just write the correct data out? */
- KUNIT_EXPECT_MEMEQ(test, data->vals, val, sizeof(val));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, true, data->written[i]);
+ KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
+}
+
+static void cache_sync_after_cache_only(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val[BLOCK_TEST_SIZE];
+ unsigned int val_mask;
+ int i;
- regmap_exit(map);
+ config = test_regmap_config;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ val_mask = GENMASK(config.val_bits - 1, 0);
+ get_random_bytes(&val, sizeof(val));
+
+ /* Put some data into the cache */
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
+ BLOCK_TEST_SIZE));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+
+ /* Set cache-only and change the values */
+ regcache_cache_only(map, true);
+ for (i = 0; i < ARRAY_SIZE(val); ++i)
+ val[i] = ~val[i] & val_mask;
+
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
+ BLOCK_TEST_SIZE));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
+
+ KUNIT_EXPECT_MEMNEQ(test, &data->vals[param->from_reg], val, sizeof(val));
+
+ /* Exit cache-only and sync the cache without marking hardware registers dirty */
+ regcache_cache_only(map, false);
+
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+
+ /* Did we just write the correct data out? */
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + i]);
}
-static void cache_sync_defaults(struct kunit *test)
+static void cache_sync_defaults_marked_dirty(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -691,10 +914,9 @@ static void cache_sync_defaults(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -702,24 +924,85 @@ static void cache_sync_defaults(struct kunit *test)
get_random_bytes(&val, sizeof(val));
/* Change the value of one register */
- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 2, val));
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, val));
/* Resync */
regcache_mark_dirty(map);
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->written[i] = false;
+ data->written[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* Did we just sync the one register we touched? */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, i == 2, data->written[i]);
+ KUNIT_EXPECT_EQ(test, i == 2, data->written[param->from_reg + i]);
+
+ /* Rewrite registers back to their defaults */
+ for (i = 0; i < config.num_reg_defaults; ++i)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, config.reg_defaults[i].reg,
+ config.reg_defaults[i].def));
- regmap_exit(map);
+ /*
+ * Resync after regcache_mark_dirty() should not write out registers
+ * that are at default value
+ */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+ regcache_mark_dirty(map);
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
+}
+
+static void cache_sync_default_after_cache_only(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int orig_val;
+ int i;
+
+ config = test_regmap_config;
+ config.num_reg_defaults = BLOCK_TEST_SIZE;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + 2, &orig_val));
+
+ /* Enter cache-only and change the value of one register */
+ regcache_cache_only(map, true);
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val + 1));
+
+ /* Exit cache-only and resync, should write out the changed register */
+ regcache_cache_only(map, false);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+
+ /* Was the register written out? */
+ KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
+ KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val + 1);
+
+ /* Enter cache-only and write register back to its default value */
+ regcache_cache_only(map, true);
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val));
+
+ /* Resync should write out the new value */
+ regcache_cache_only(map, false);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+ KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
+ KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val);
}
static void cache_sync_readonly(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -727,40 +1010,37 @@ static void cache_sync_readonly(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.writeable_reg = reg_5_false;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* Read all registers to fill the cache */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
/* Change the value of all registers, readonly should fail */
get_random_bytes(&val, sizeof(val));
regcache_cache_only(map, true);
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
+ KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, param->from_reg + i, val) == 0);
regcache_cache_only(map, false);
/* Resync */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->written[i] = false;
+ data->written[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* Did that match what we see on the device? */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
-
- regmap_exit(map);
+ KUNIT_EXPECT_EQ(test, i != 5, data->written[param->from_reg + i]);
}
static void cache_sync_patch(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -770,23 +1050,22 @@ static void cache_sync_patch(struct kunit *test)
/* We need defaults so readback works */
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* Stash the original values */
- KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
BLOCK_TEST_SIZE));
/* Patch a couple of values */
- patch[0].reg = 2;
+ patch[0].reg = param->from_reg + 2;
patch[0].def = rval[2] + 1;
patch[0].delay_us = 0;
- patch[1].reg = 5;
+ patch[1].reg = param->from_reg + 5;
patch[1].def = rval[5] + 1;
patch[1].delay_us = 0;
KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
@@ -795,33 +1074,31 @@ static void cache_sync_patch(struct kunit *test)
/* Sync the cache */
regcache_mark_dirty(map);
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->written[i] = false;
+ data->written[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* The patch should be on the device but not in the cache */
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
- KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
KUNIT_EXPECT_EQ(test, val, rval[i]);
switch (i) {
case 2:
case 5:
- KUNIT_EXPECT_EQ(test, true, data->written[i]);
- KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
+ KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
+ KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i] + 1);
break;
default:
- KUNIT_EXPECT_EQ(test, false, data->written[i]);
- KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
+ KUNIT_EXPECT_EQ(test, false, data->written[param->from_reg + i]);
+ KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i]);
break;
}
}
-
- regmap_exit(map);
}
static void cache_drop(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -829,41 +1106,267 @@ static void cache_drop(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* Ensure the data is read from the cache */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->read[i] = false;
- KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
+ data->read[param->from_reg + i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
BLOCK_TEST_SIZE));
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
- KUNIT_EXPECT_FALSE(test, data->read[i]);
- data->read[i] = false;
+ KUNIT_EXPECT_FALSE(test, data->read[param->from_reg + i]);
+ data->read[param->from_reg + i] = false;
}
- KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
/* Drop some registers */
- KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 3, 5));
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, param->from_reg + 3,
+ param->from_reg + 5));
/* Reread and check only the dropped registers hit the device. */
- KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
BLOCK_TEST_SIZE));
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, data->read[i], i >= 3 && i <= 5);
- KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
+ KUNIT_EXPECT_EQ(test, data->read[param->from_reg + i], i >= 3 && i <= 5);
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
+}
+
+static void cache_drop_with_non_contiguous_ranges(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val[4][BLOCK_TEST_SIZE];
+ unsigned int reg;
+ const int num_ranges = ARRAY_SIZE(val) * 2;
+ int rangeidx, i;
- regmap_exit(map);
+ static_assert(ARRAY_SIZE(val) == 4);
+
+ config = test_regmap_config;
+ config.max_register = param->from_reg + (num_ranges * BLOCK_TEST_SIZE);
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ for (i = 0; i < config.max_register + 1; i++)
+ data->written[i] = false;
+
+ /* Create non-contiguous cache blocks by writing every other range */
+ get_random_bytes(&val, sizeof(val));
+ for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) {
+ reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, reg,
+ &val[rangeidx / 2],
+ BLOCK_TEST_SIZE));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[reg],
+ &val[rangeidx / 2], sizeof(val[rangeidx / 2]));
+ }
+
+ /* Check that odd ranges weren't written */
+ for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
+ reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
+ }
+
+ /* Drop range 2 */
+ reg = param->from_reg + (2 * BLOCK_TEST_SIZE);
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg, reg + BLOCK_TEST_SIZE - 1));
+
+ /* Drop part of range 4 */
+ reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg + 3, reg + 5));
+
+ /* Mark dirty and reset mock registers to 0 */
+ regcache_mark_dirty(map);
+ for (i = 0; i < config.max_register + 1; i++) {
+ data->vals[i] = 0;
+ data->written[i] = false;
+ }
+
+ /* The registers that were dropped from range 4 should now remain at 0 */
+ val[4 / 2][3] = 0;
+ val[4 / 2][4] = 0;
+ val[4 / 2][5] = 0;
+
+ /* Sync and check that the expected register ranges were written */
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+
+ /* Check that odd ranges weren't written */
+ for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
+ reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
+ }
+
+ /* Check that even ranges (except 2 and 4) were written */
+ for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) {
+ if ((rangeidx == 2) || (rangeidx == 4))
+ continue;
+
+ reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_TRUE(test, data->written[reg + i]);
+
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[reg],
+ &val[rangeidx / 2], sizeof(val[rangeidx / 2]));
+ }
+
+ /* Check that range 2 wasn't written */
+ reg = param->from_reg + (2 * BLOCK_TEST_SIZE);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
+
+ /* Check that range 4 was partially written */
+ reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, data->written[reg + i], i < 3 || i > 5);
+
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], &val[4 / 2], sizeof(val[4 / 2]));
+
+ /* Nothing before param->from_reg should have been written */
+ for (i = 0; i < param->from_reg; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
+}
+
+static void cache_drop_all_and_sync_marked_dirty(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+ config.num_reg_defaults = BLOCK_TEST_SIZE;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* Ensure the data is read from the cache */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->read[param->from_reg + i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
+ BLOCK_TEST_SIZE));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
+
+ /* Change all values in cache from defaults */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
+
+ /* Drop all registers */
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
+
+ /* Mark dirty and cache sync should not write anything. */
+ regcache_mark_dirty(map);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+ for (i = 0; i <= config.max_register; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
+}
+
+static void cache_drop_all_and_sync_no_defaults(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* Ensure the data is read from the cache */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->read[param->from_reg + i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
+ BLOCK_TEST_SIZE));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
+
+ /* Change all values in cache */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
+
+ /* Drop all registers */
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
+
+ /*
+ * Sync cache without marking it dirty. All registers were dropped
+ * so the cache should not have any entries to write out.
+ */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+ for (i = 0; i <= config.max_register; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
+}
+
+static void cache_drop_all_and_sync_has_defaults(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+ config.num_reg_defaults = BLOCK_TEST_SIZE;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* Ensure the data is read from the cache */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->read[param->from_reg + i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
+ BLOCK_TEST_SIZE));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
+
+ /* Change all values in cache from defaults */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
+
+ /* Drop all registers */
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
+
+ /*
+ * Sync cache without marking it dirty. All registers were dropped
+ * so the cache should not have any entries to write out.
+ */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+ for (i = 0; i <= config.max_register; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
}
static void cache_present(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -871,39 +1374,35 @@ static void cache_present(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->read[i] = false;
+ data->read[param->from_reg + i] = false;
/* No defaults so no registers cached. */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, i));
+ KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
/* We didn't trigger any reads */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_ASSERT_FALSE(test, data->read[i]);
+ KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
/* Fill the cache */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
/* Now everything should be cached */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, i));
-
- regmap_exit(map);
+ KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i));
}
/* Check that caching the window register works with sync */
static void cache_range_window_reg(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -911,13 +1410,12 @@ static void cache_range_window_reg(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.volatile_reg = test_range_window_volatile;
config.ranges = &test_range;
config.num_ranges = 1;
config.max_register = test_range.range_max;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -953,41 +1451,29 @@ static void cache_range_window_reg(struct kunit *test)
KUNIT_ASSERT_EQ(test, val, 2);
}
-struct raw_test_types {
- const char *name;
-
- enum regcache_type cache_type;
- enum regmap_endian val_endian;
-};
-
-static void raw_to_desc(const struct raw_test_types *t, char *desc)
-{
- strcpy(desc, t->name);
-}
-
-static const struct raw_test_types raw_types_list[] = {
- { "none-little", REGCACHE_NONE, REGMAP_ENDIAN_LITTLE },
- { "none-big", REGCACHE_NONE, REGMAP_ENDIAN_BIG },
- { "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE },
- { "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG },
- { "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
- { "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
- { "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE },
- { "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG },
+static const struct regmap_test_param raw_types_list[] = {
+ { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG },
};
-KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, raw_to_desc);
+KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, param_to_desc);
-static const struct raw_test_types raw_cache_types_list[] = {
- { "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE },
- { "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG },
- { "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
- { "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
- { "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE },
- { "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG },
+static const struct regmap_test_param raw_cache_types_list[] = {
+ { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG },
};
-KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, raw_to_desc);
+KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, param_to_desc);
static const struct regmap_config raw_regmap_config = {
.max_register = BLOCK_TEST_SIZE,
@@ -997,18 +1483,20 @@ static const struct regmap_config raw_regmap_config = {
.val_bits = 16,
};
-static struct regmap *gen_raw_regmap(struct regmap_config *config,
- struct raw_test_types *test_type,
+static struct regmap *gen_raw_regmap(struct kunit *test,
+ struct regmap_config *config,
struct regmap_ram_data **data)
{
+ struct regmap_test_priv *priv = test->priv;
+ const struct regmap_test_param *param = test->param_value;
u16 *buf;
struct regmap *ret;
size_t size = (config->max_register + 1) * config->reg_bits / 8;
int i;
struct reg_default *defaults;
- config->cache_type = test_type->cache_type;
- config->val_format_endian = test_type->val_endian;
+ config->cache_type = param->cache;
+ config->val_format_endian = param->val_endian;
config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
config->cache_type == REGCACHE_MAPLE;
@@ -1033,7 +1521,7 @@ static struct regmap *gen_raw_regmap(struct regmap_config *config,
for (i = 0; i < config->num_reg_defaults; i++) {
defaults[i].reg = i;
- switch (test_type->val_endian) {
+ switch (param->val_endian) {
case REGMAP_ENDIAN_LITTLE:
defaults[i].def = le16_to_cpu(buf[i]);
break;
@@ -1052,10 +1540,12 @@ static struct regmap *gen_raw_regmap(struct regmap_config *config,
if (config->cache_type == REGCACHE_NONE)
config->num_reg_defaults = 0;
- ret = regmap_init_raw_ram(config, *data);
+ ret = regmap_init_raw_ram(priv->dev, config, *data);
if (IS_ERR(ret)) {
kfree(buf);
kfree(*data);
+ } else {
+ kunit_add_action(test, regmap_exit_action, ret);
}
return ret;
@@ -1063,7 +1553,6 @@ static struct regmap *gen_raw_regmap(struct regmap_config *config,
static void raw_read_defaults_single(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1072,7 +1561,7 @@ static void raw_read_defaults_single(struct kunit *test)
config = raw_regmap_config;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1082,13 +1571,10 @@ static void raw_read_defaults_single(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
}
-
- regmap_exit(map);
}
static void raw_read_defaults(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1099,35 +1585,31 @@ static void raw_read_defaults(struct kunit *test)
config = raw_regmap_config;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
val_len = sizeof(*rval) * (config.max_register + 1);
- rval = kmalloc(val_len, GFP_KERNEL);
+ rval = kunit_kmalloc(test, val_len, GFP_KERNEL);
KUNIT_ASSERT_TRUE(test, rval != NULL);
if (!rval)
return;
-
+
/* Check that we can read the defaults via the API */
KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
for (i = 0; i < config.max_register + 1; i++) {
def = config.reg_defaults[i].def;
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
- KUNIT_EXPECT_EQ(test, def, be16_to_cpu(rval[i]));
+ KUNIT_EXPECT_EQ(test, def, be16_to_cpu((__force __be16)rval[i]));
} else {
- KUNIT_EXPECT_EQ(test, def, le16_to_cpu(rval[i]));
+ KUNIT_EXPECT_EQ(test, def, le16_to_cpu((__force __le16)rval[i]));
}
}
-
- kfree(rval);
- regmap_exit(map);
}
static void raw_write_read_single(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1136,7 +1618,7 @@ static void raw_write_read_single(struct kunit *test)
config = raw_regmap_config;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1147,13 +1629,10 @@ static void raw_write_read_single(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
KUNIT_EXPECT_EQ(test, val, rval);
-
- regmap_exit(map);
}
static void raw_write(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1164,7 +1643,7 @@ static void raw_write(struct kunit *test)
config = raw_regmap_config;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1185,10 +1664,10 @@ static void raw_write(struct kunit *test)
case 3:
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
KUNIT_EXPECT_EQ(test, rval,
- be16_to_cpu(val[i % 2]));
+ be16_to_cpu((__force __be16)val[i % 2]));
} else {
KUNIT_EXPECT_EQ(test, rval,
- le16_to_cpu(val[i % 2]));
+ le16_to_cpu((__force __le16)val[i % 2]));
}
break;
default:
@@ -1199,8 +1678,6 @@ static void raw_write(struct kunit *test)
/* The values should appear in the "hardware" */
KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
-
- regmap_exit(map);
}
static bool reg_zero(struct device *dev, unsigned int reg)
@@ -1215,7 +1692,6 @@ static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg)
static void raw_noinc_write(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1228,7 +1704,7 @@ static void raw_noinc_write(struct kunit *test)
config.writeable_noinc_reg = reg_zero;
config.readable_noinc_reg = reg_zero;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1259,13 +1735,10 @@ static void raw_noinc_write(struct kunit *test)
/* Make sure we didn't touch the register after the noinc register */
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
KUNIT_ASSERT_EQ(test, val_test, val);
-
- regmap_exit(map);
}
static void raw_sync(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1276,7 +1749,7 @@ static void raw_sync(struct kunit *test)
config = raw_regmap_config;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1300,10 +1773,10 @@ static void raw_sync(struct kunit *test)
case 3:
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
KUNIT_EXPECT_EQ(test, rval,
- be16_to_cpu(val[i - 2]));
+ be16_to_cpu((__force __be16)val[i - 2]));
} else {
KUNIT_EXPECT_EQ(test, rval,
- le16_to_cpu(val[i - 2]));
+ le16_to_cpu((__force __le16)val[i - 2]));
}
break;
case 4:
@@ -1323,7 +1796,7 @@ static void raw_sync(struct kunit *test)
val[2] = cpu_to_be16(val[2]);
else
val[2] = cpu_to_le16(val[2]);
-
+
/* The values should not appear in the "hardware" */
KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
@@ -1337,13 +1810,10 @@ static void raw_sync(struct kunit *test)
/* The values should now appear in the "hardware" */
KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
-
- regmap_exit(map);
}
static void raw_ranges(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1356,7 +1826,7 @@ static void raw_ranges(struct kunit *test)
config.num_ranges = 1;
config.max_register = test_range.range_max;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1402,12 +1872,12 @@ static void raw_ranges(struct kunit *test)
KUNIT_EXPECT_FALSE(test, data->read[i]);
KUNIT_EXPECT_FALSE(test, data->written[i]);
}
-
- regmap_exit(map);
}
static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
+ KUNIT_CASE_PARAM(read_bypassed, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(read_bypassed_volatile, real_cache_types_gen_params),
KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
@@ -1419,13 +1889,19 @@ static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
- KUNIT_CASE_PARAM(cache_sync, real_cache_types_gen_params),
- KUNIT_CASE_PARAM(cache_sync_defaults, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_sync_marked_dirty, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_sync_after_cache_only, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_sync_defaults_marked_dirty, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_sync_default_after_cache_only, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_drop_with_non_contiguous_ranges, sparse_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_drop_all_and_sync_marked_dirty, sparse_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults, sparse_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
- KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_only_gen_params),
KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
@@ -1437,8 +1913,40 @@ static struct kunit_case regmap_test_cases[] = {
{}
};
+static int regmap_test_init(struct kunit *test)
+{
+ struct regmap_test_priv *priv;
+ struct device *dev;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ test->priv = priv;
+
+ dev = kunit_device_register(test, "regmap_test");
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ priv->dev = get_device(dev);
+ dev_set_drvdata(dev, test);
+
+ return 0;
+}
+
+static void regmap_test_exit(struct kunit *test)
+{
+ struct regmap_test_priv *priv = test->priv;
+
+ /* Destroy the dummy struct device */
+ if (priv && priv->dev)
+ put_device(priv->dev);
+}
+
static struct kunit_suite regmap_test_suite = {
.name = "regmap",
+ .init = regmap_test_init,
+ .exit = regmap_test_exit,
.test_cases = regmap_test_cases,
};
kunit_test_suite(regmap_test_suite);
diff --git a/drivers/base/regmap/regmap-mdio.c b/drivers/base/regmap/regmap-mdio.c
index 6aa6a2409478..9573bf3b52f4 100644
--- a/drivers/base/regmap/regmap-mdio.c
+++ b/drivers/base/regmap/regmap-mdio.c
@@ -117,5 +117,5 @@ struct regmap *__devm_regmap_init_mdio(struct mdio_device *mdio_dev,
EXPORT_SYMBOL_GPL(__devm_regmap_init_mdio);
MODULE_AUTHOR("Sander Vanheule <sander@svanheule.net>");
-MODULE_DESCRIPTION("Regmap MDIO Module");
+MODULE_DESCRIPTION("regmap MDIO Module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-ram.c b/drivers/base/regmap/regmap-ram.c
index 192d6b131dff..5b4cbf982a11 100644
--- a/drivers/base/regmap/regmap-ram.c
+++ b/drivers/base/regmap/regmap-ram.c
@@ -53,7 +53,8 @@ static const struct regmap_bus regmap_ram = {
.free_context = regmap_ram_free_context,
};
-struct regmap *__regmap_init_ram(const struct regmap_config *config,
+struct regmap *__regmap_init_ram(struct device *dev,
+ const struct regmap_config *config,
struct regmap_ram_data *data,
struct lock_class_key *lock_key,
const char *lock_name)
@@ -75,7 +76,7 @@ struct regmap *__regmap_init_ram(const struct regmap_config *config,
if (!data->written)
return ERR_PTR(-ENOMEM);
- map = __regmap_init(NULL, &regmap_ram, data, config,
+ map = __regmap_init(dev, &regmap_ram, data, config,
lock_key, lock_name);
return map;
diff --git a/drivers/base/regmap/regmap-raw-ram.c b/drivers/base/regmap/regmap-raw-ram.c
index 93ae07b503fd..69eabfb89eda 100644
--- a/drivers/base/regmap/regmap-raw-ram.c
+++ b/drivers/base/regmap/regmap-raw-ram.c
@@ -107,7 +107,8 @@ static const struct regmap_bus regmap_raw_ram = {
.free_context = regmap_raw_ram_free_context,
};
-struct regmap *__regmap_init_raw_ram(const struct regmap_config *config,
+struct regmap *__regmap_init_raw_ram(struct device *dev,
+ const struct regmap_config *config,
struct regmap_ram_data *data,
struct lock_class_key *lock_key,
const char *lock_name)
@@ -134,7 +135,7 @@ struct regmap *__regmap_init_raw_ram(const struct regmap_config *config,
data->reg_endian = config->reg_format_endian;
- map = __regmap_init(NULL, &regmap_raw_ram, data, config,
+ map = __regmap_init(dev, &regmap_raw_ram, data, config,
lock_key, lock_name);
return map;
diff --git a/drivers/base/regmap/regmap-sdw-mbq.c b/drivers/base/regmap/regmap-sdw-mbq.c
index 388c3a087bd9..c99eada83780 100644
--- a/drivers/base/regmap/regmap-sdw-mbq.c
+++ b/drivers/base/regmap/regmap-sdw-mbq.c
@@ -97,5 +97,5 @@ struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq);
-MODULE_DESCRIPTION("Regmap SoundWire MBQ Module");
+MODULE_DESCRIPTION("regmap SoundWire MBQ Module");
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
index 159c0b740b00..ea631ac7c7ec 100644
--- a/drivers/base/regmap/regmap-sdw.c
+++ b/drivers/base/regmap/regmap-sdw.c
@@ -98,5 +98,5 @@ struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw);
-MODULE_DESCRIPTION("Regmap SoundWire Module");
+MODULE_DESCRIPTION("regmap SoundWire Module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 37ab23a9d034..094cf2a2ca3c 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -165,4 +165,5 @@ struct regmap *__devm_regmap_init_spi(struct spi_device *spi,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_spi);
+MODULE_DESCRIPTION("regmap SPI Module");
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 5cb425f6f02d..0a34dd3c4f38 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -2839,6 +2839,43 @@ int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
EXPORT_SYMBOL_GPL(regmap_read);
/**
+ * regmap_read_bypassed() - Read a value from a single register direct
+ * from the device, bypassing the cache
+ *
+ * @map: Register map to read from
+ * @reg: Register to be read from
+ * @val: Pointer to store read value
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val)
+{
+ int ret;
+ bool bypass, cache_only;
+
+ if (!IS_ALIGNED(reg, map->reg_stride))
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ bypass = map->cache_bypass;
+ cache_only = map->cache_only;
+ map->cache_bypass = true;
+ map->cache_only = false;
+
+ ret = _regmap_read(map, reg, val);
+
+ map->cache_bypass = bypass;
+ map->cache_only = cache_only;
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_read_bypassed);
+
+/**
* regmap_raw_read() - Read raw data from the device
*
* @map: Register map to read from
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index e322cef6596b..b900fe9e0030 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -29,10 +29,7 @@
/*
* Each block ramdisk device has a xarray brd_pages of pages that stores
- * the pages containing the block device's contents. A brd page's ->index is
- * its offset in PAGE_SIZE units. This is similar to, but in no way connected
- * with, the kernel's pagecache or buffer cache (which sit above our block
- * device).
+ * the pages containing the block device's contents.
*/
struct brd_device {
int brd_number;
@@ -51,15 +48,7 @@ struct brd_device {
*/
static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
{
- pgoff_t idx;
- struct page *page;
-
- idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
- page = xa_load(&brd->brd_pages, idx);
-
- BUG_ON(page && page->index != idx);
-
- return page;
+ return xa_load(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT);
}
/*
@@ -67,8 +56,8 @@ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
*/
static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp)
{
- pgoff_t idx;
- struct page *page, *cur;
+ pgoff_t idx = sector >> PAGE_SECTORS_SHIFT;
+ struct page *page;
int ret = 0;
page = brd_lookup_page(brd, sector);
@@ -80,23 +69,16 @@ static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp)
return -ENOMEM;
xa_lock(&brd->brd_pages);
-
- idx = sector >> PAGE_SECTORS_SHIFT;
- page->index = idx;
-
- cur = __xa_cmpxchg(&brd->brd_pages, idx, NULL, page, gfp);
-
- if (unlikely(cur)) {
- __free_page(page);
- ret = xa_err(cur);
- if (!ret && (cur->index != idx))
- ret = -EIO;
- } else {
+ ret = __xa_insert(&brd->brd_pages, idx, page, gfp);
+ if (!ret)
brd->brd_nr_pages++;
- }
-
xa_unlock(&brd->brd_pages);
+ if (ret < 0) {
+ __free_page(page);
+ if (ret == -EBUSY)
+ ret = 0;
+ }
return ret;
}
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index ed33cf7192d2..4005a8b685e8 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -225,6 +225,10 @@ static unsigned long g_cache_size;
module_param_named(cache_size, g_cache_size, ulong, 0444);
MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)");
+static bool g_fua = true;
+module_param_named(fua, g_fua, bool, 0444);
+MODULE_PARM_DESC(zoned, "Enable/disable FUA support when cache_size is used. Default: true");
+
static unsigned int g_mbps;
module_param_named(mbps, g_mbps, uint, 0444);
MODULE_PARM_DESC(mbps, "Limit maximum bandwidth (in MiB/s). Default: 0 (no limit)");
@@ -253,6 +257,11 @@ static unsigned int g_zone_max_active;
module_param_named(zone_max_active, g_zone_max_active, uint, 0444);
MODULE_PARM_DESC(zone_max_active, "Maximum number of active zones when block device is zoned. Default: 0 (no limit)");
+static int g_zone_append_max_sectors = INT_MAX;
+module_param_named(zone_append_max_sectors, g_zone_append_max_sectors, int, 0444);
+MODULE_PARM_DESC(zone_append_max_sectors,
+ "Maximum size of a zone append command (in 512B sectors). Specify 0 for zone append emulation");
+
static struct nullb_device *null_alloc_dev(void);
static void null_free_dev(struct nullb_device *dev);
static void null_del_dev(struct nullb *nullb);
@@ -436,10 +445,12 @@ NULLB_DEVICE_ATTR(zone_capacity, ulong, NULL);
NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
NULLB_DEVICE_ATTR(zone_max_open, uint, NULL);
NULLB_DEVICE_ATTR(zone_max_active, uint, NULL);
+NULLB_DEVICE_ATTR(zone_append_max_sectors, uint, NULL);
NULLB_DEVICE_ATTR(virt_boundary, bool, NULL);
NULLB_DEVICE_ATTR(no_sched, bool, NULL);
NULLB_DEVICE_ATTR(shared_tags, bool, NULL);
NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
+NULLB_DEVICE_ATTR(fua, bool, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -580,12 +591,14 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_zone_nr_conv,
&nullb_device_attr_zone_max_open,
&nullb_device_attr_zone_max_active,
+ &nullb_device_attr_zone_append_max_sectors,
&nullb_device_attr_zone_readonly,
&nullb_device_attr_zone_offline,
&nullb_device_attr_virt_boundary,
&nullb_device_attr_no_sched,
&nullb_device_attr_shared_tags,
&nullb_device_attr_shared_tag_bitmap,
+ &nullb_device_attr_fua,
NULL,
};
@@ -664,14 +677,14 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE,
- "badblocks,blocking,blocksize,cache_size,"
+ "badblocks,blocking,blocksize,cache_size,fua,"
"completion_nsec,discard,home_node,hw_queue_depth,"
"irqmode,max_sectors,mbps,memory_backed,no_sched,"
"poll_queues,power,queue_mode,shared_tag_bitmap,"
"shared_tags,size,submit_queues,use_per_node_hctx,"
"virt_boundary,zoned,zone_capacity,zone_max_active,"
"zone_max_open,zone_nr_conv,zone_offline,zone_readonly,"
- "zone_size\n");
+ "zone_size,zone_append_max_sectors\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -751,10 +764,13 @@ static struct nullb_device *null_alloc_dev(void)
dev->zone_nr_conv = g_zone_nr_conv;
dev->zone_max_open = g_zone_max_open;
dev->zone_max_active = g_zone_max_active;
+ dev->zone_append_max_sectors = g_zone_append_max_sectors;
dev->virt_boundary = g_virt_boundary;
dev->no_sched = g_no_sched;
dev->shared_tags = g_shared_tags;
dev->shared_tag_bitmap = g_shared_tag_bitmap;
+ dev->fua = g_fua;
+
return dev;
}
@@ -1151,7 +1167,7 @@ blk_status_t null_handle_discard(struct nullb_device *dev,
return BLK_STS_OK;
}
-static int null_handle_flush(struct nullb *nullb)
+static blk_status_t null_handle_flush(struct nullb *nullb)
{
int err;
@@ -1168,7 +1184,7 @@ static int null_handle_flush(struct nullb *nullb)
WARN_ON(!radix_tree_empty(&nullb->dev->cache));
spin_unlock_irq(&nullb->lock);
- return err;
+ return errno_to_blk_status(err);
}
static int null_transfer(struct nullb *nullb, struct page *page,
@@ -1206,7 +1222,7 @@ static int null_handle_rq(struct nullb_cmd *cmd)
{
struct request *rq = blk_mq_rq_from_pdu(cmd);
struct nullb *nullb = cmd->nq->dev->nullb;
- int err;
+ int err = 0;
unsigned int len;
sector_t sector = blk_rq_pos(rq);
struct req_iterator iter;
@@ -1218,15 +1234,13 @@ static int null_handle_rq(struct nullb_cmd *cmd)
err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
op_is_write(req_op(rq)), sector,
rq->cmd_flags & REQ_FUA);
- if (err) {
- spin_unlock_irq(&nullb->lock);
- return err;
- }
+ if (err)
+ break;
sector += len >> SECTOR_SHIFT;
}
spin_unlock_irq(&nullb->lock);
- return 0;
+ return errno_to_blk_status(err);
}
static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
@@ -1273,8 +1287,8 @@ static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
if (op == REQ_OP_DISCARD)
return null_handle_discard(dev, sector, nr_sectors);
- return errno_to_blk_status(null_handle_rq(cmd));
+ return null_handle_rq(cmd);
}
static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd)
@@ -1343,7 +1357,7 @@ static void null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
blk_status_t sts;
if (op == REQ_OP_FLUSH) {
- cmd->error = errno_to_blk_status(null_handle_flush(nullb));
+ cmd->error = null_handle_flush(nullb);
goto out;
}
@@ -1912,7 +1926,7 @@ static int null_add_dev(struct nullb_device *dev)
if (dev->cache_size > 0) {
set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
- blk_queue_write_cache(nullb->q, true, true);
+ blk_queue_write_cache(nullb->q, true, dev->fua);
}
nullb->q->queuedata = nullb;
@@ -2113,10 +2127,13 @@ static void __exit null_exit(void)
if (tag_set.ops)
blk_mq_free_tag_set(&tag_set);
+
+ mutex_destroy(&lock);
}
module_init(null_init);
module_exit(null_exit);
MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
+MODULE_DESCRIPTION("multi queue aware block test driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 477b97746823..3234e6c85eed 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -82,6 +82,7 @@ struct nullb_device {
unsigned int zone_nr_conv; /* number of conventional zones */
unsigned int zone_max_open; /* max number of open zones */
unsigned int zone_max_active; /* max number of active zones */
+ unsigned int zone_append_max_sectors; /* Max sectors per zone append command */
unsigned int submit_queues; /* number of submission queues */
unsigned int prev_submit_queues; /* number of submission queues before change */
unsigned int poll_queues; /* number of IOPOLL submission queues */
@@ -104,6 +105,7 @@ struct nullb_device {
bool no_sched; /* no IO scheduler for the device */
bool shared_tags; /* share tag set between devices for blk-mq */
bool shared_tag_bitmap; /* use hostwide shared tags */
+ bool fua; /* Support FUA */
};
struct nullb {
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 1689e2584104..5b5a63adacc1 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -9,6 +9,8 @@
#undef pr_fmt
#define pr_fmt(fmt) "null_blk: " fmt
+#define NULL_ZONE_INVALID_WP ((sector_t)-1)
+
static inline sector_t mb_to_sects(unsigned long mb)
{
return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
@@ -19,18 +21,6 @@ static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
return sect >> ilog2(dev->zone_size_sects);
}
-static inline void null_lock_zone_res(struct nullb_device *dev)
-{
- if (dev->need_zone_res_mgmt)
- spin_lock_irq(&dev->zone_res_lock);
-}
-
-static inline void null_unlock_zone_res(struct nullb_device *dev)
-{
- if (dev->need_zone_res_mgmt)
- spin_unlock_irq(&dev->zone_res_lock);
-}
-
static inline void null_init_zone_lock(struct nullb_device *dev,
struct nullb_zone *zone)
{
@@ -103,6 +93,11 @@ int null_init_zoned_dev(struct nullb_device *dev,
dev->zone_nr_conv);
}
+ dev->zone_append_max_sectors =
+ min(ALIGN_DOWN(dev->zone_append_max_sectors,
+ dev->blocksize >> SECTOR_SHIFT),
+ zone_capacity_sects);
+
/* Max active zones has to be < nbr of seq zones in order to be enforceable */
if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) {
dev->zone_max_active = 0;
@@ -154,7 +149,7 @@ int null_init_zoned_dev(struct nullb_device *dev,
lim->zoned = true;
lim->chunk_sectors = dev->zone_size_sects;
- lim->max_zone_append_sectors = dev->zone_size_sects;
+ lim->max_zone_append_sectors = dev->zone_append_max_sectors;
lim->max_open_zones = dev->zone_max_open;
lim->max_active_zones = dev->zone_max_active;
return 0;
@@ -163,11 +158,16 @@ int null_init_zoned_dev(struct nullb_device *dev,
int null_register_zoned_dev(struct nullb *nullb)
{
struct request_queue *q = nullb->q;
+ struct gendisk *disk = nullb->disk;
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
- blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
- nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
- return blk_revalidate_disk_zones(nullb->disk, NULL);
+ disk->nr_zones = bdev_nr_zones(disk->part0);
+
+ pr_info("%s: using %s zone append\n",
+ disk->disk_name,
+ queue_emulates_zone_append(q) ? "emulated" : "native");
+
+ return blk_revalidate_disk_zones(disk);
}
void null_free_zoned_dev(struct nullb_device *dev)
@@ -241,35 +241,6 @@ size_t null_zone_valid_read_len(struct nullb *nullb,
return (zone->wp - sector) << SECTOR_SHIFT;
}
-static blk_status_t __null_close_zone(struct nullb_device *dev,
- struct nullb_zone *zone)
-{
- switch (zone->cond) {
- case BLK_ZONE_COND_CLOSED:
- /* close operation on closed is not an error */
- return BLK_STS_OK;
- case BLK_ZONE_COND_IMP_OPEN:
- dev->nr_zones_imp_open--;
- break;
- case BLK_ZONE_COND_EXP_OPEN:
- dev->nr_zones_exp_open--;
- break;
- case BLK_ZONE_COND_EMPTY:
- case BLK_ZONE_COND_FULL:
- default:
- return BLK_STS_IOERR;
- }
-
- if (zone->wp == zone->start) {
- zone->cond = BLK_ZONE_COND_EMPTY;
- } else {
- zone->cond = BLK_ZONE_COND_CLOSED;
- dev->nr_zones_closed++;
- }
-
- return BLK_STS_OK;
-}
-
static void null_close_imp_open_zone(struct nullb_device *dev)
{
struct nullb_zone *zone;
@@ -286,7 +257,13 @@ static void null_close_imp_open_zone(struct nullb_device *dev)
zno = dev->zone_nr_conv;
if (zone->cond == BLK_ZONE_COND_IMP_OPEN) {
- __null_close_zone(dev, zone);
+ dev->nr_zones_imp_open--;
+ if (zone->wp == zone->start) {
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ } else {
+ zone->cond = BLK_ZONE_COND_CLOSED;
+ dev->nr_zones_closed++;
+ }
dev->imp_close_zone_no = zno;
return;
}
@@ -374,73 +351,73 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
null_lock_zone(dev, zone);
- if (zone->cond == BLK_ZONE_COND_FULL ||
- zone->cond == BLK_ZONE_COND_READONLY ||
- zone->cond == BLK_ZONE_COND_OFFLINE) {
- /* Cannot write to the zone */
- ret = BLK_STS_IOERR;
- goto unlock;
- }
-
/*
- * Regular writes must be at the write pointer position.
- * Zone append writes are automatically issued at the write
- * pointer and the position returned using the request or BIO
- * sector.
+ * Regular writes must be at the write pointer position. Zone append
+ * writes are automatically issued at the write pointer and the position
+ * returned using the request sector. Note that we do not check the zone
+ * condition because for FULL, READONLY and OFFLINE zones, the sector
+ * check against the zone write pointer will always result in failing
+ * the command.
*/
if (append) {
+ if (WARN_ON_ONCE(!dev->zone_append_max_sectors) ||
+ zone->wp == NULL_ZONE_INVALID_WP) {
+ ret = BLK_STS_IOERR;
+ goto unlock_zone;
+ }
sector = zone->wp;
blk_mq_rq_from_pdu(cmd)->__sector = sector;
- } else if (sector != zone->wp) {
- ret = BLK_STS_IOERR;
- goto unlock;
}
- if (zone->wp + nr_sectors > zone->start + zone->capacity) {
+ if (sector != zone->wp ||
+ zone->wp + nr_sectors > zone->start + zone->capacity) {
ret = BLK_STS_IOERR;
- goto unlock;
+ goto unlock_zone;
}
if (zone->cond == BLK_ZONE_COND_CLOSED ||
zone->cond == BLK_ZONE_COND_EMPTY) {
- null_lock_zone_res(dev);
+ if (dev->need_zone_res_mgmt) {
+ spin_lock(&dev->zone_res_lock);
- ret = null_check_zone_resources(dev, zone);
- if (ret != BLK_STS_OK) {
- null_unlock_zone_res(dev);
- goto unlock;
- }
- if (zone->cond == BLK_ZONE_COND_CLOSED) {
- dev->nr_zones_closed--;
- dev->nr_zones_imp_open++;
- } else if (zone->cond == BLK_ZONE_COND_EMPTY) {
- dev->nr_zones_imp_open++;
- }
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK) {
+ spin_unlock(&dev->zone_res_lock);
+ goto unlock_zone;
+ }
+ if (zone->cond == BLK_ZONE_COND_CLOSED) {
+ dev->nr_zones_closed--;
+ dev->nr_zones_imp_open++;
+ } else if (zone->cond == BLK_ZONE_COND_EMPTY) {
+ dev->nr_zones_imp_open++;
+ }
- if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
- zone->cond = BLK_ZONE_COND_IMP_OPEN;
+ spin_unlock(&dev->zone_res_lock);
+ }
- null_unlock_zone_res(dev);
+ zone->cond = BLK_ZONE_COND_IMP_OPEN;
}
ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
if (ret != BLK_STS_OK)
- goto unlock;
+ goto unlock_zone;
zone->wp += nr_sectors;
if (zone->wp == zone->start + zone->capacity) {
- null_lock_zone_res(dev);
- if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
- dev->nr_zones_exp_open--;
- else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
- dev->nr_zones_imp_open--;
+ if (dev->need_zone_res_mgmt) {
+ spin_lock(&dev->zone_res_lock);
+ if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
+ dev->nr_zones_exp_open--;
+ else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
+ dev->nr_zones_imp_open--;
+ spin_unlock(&dev->zone_res_lock);
+ }
zone->cond = BLK_ZONE_COND_FULL;
- null_unlock_zone_res(dev);
}
ret = BLK_STS_OK;
-unlock:
+unlock_zone:
null_unlock_zone(dev, zone);
return ret;
@@ -454,54 +431,100 @@ static blk_status_t null_open_zone(struct nullb_device *dev,
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return BLK_STS_IOERR;
- null_lock_zone_res(dev);
-
switch (zone->cond) {
case BLK_ZONE_COND_EXP_OPEN:
- /* open operation on exp open is not an error */
- goto unlock;
+ /* Open operation on exp open is not an error */
+ return BLK_STS_OK;
case BLK_ZONE_COND_EMPTY:
- ret = null_check_zone_resources(dev, zone);
- if (ret != BLK_STS_OK)
- goto unlock;
- break;
case BLK_ZONE_COND_IMP_OPEN:
- dev->nr_zones_imp_open--;
- break;
case BLK_ZONE_COND_CLOSED:
- ret = null_check_zone_resources(dev, zone);
- if (ret != BLK_STS_OK)
- goto unlock;
- dev->nr_zones_closed--;
break;
case BLK_ZONE_COND_FULL:
default:
- ret = BLK_STS_IOERR;
- goto unlock;
+ return BLK_STS_IOERR;
}
- zone->cond = BLK_ZONE_COND_EXP_OPEN;
- dev->nr_zones_exp_open++;
+ if (dev->need_zone_res_mgmt) {
+ spin_lock(&dev->zone_res_lock);
-unlock:
- null_unlock_zone_res(dev);
+ switch (zone->cond) {
+ case BLK_ZONE_COND_EMPTY:
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK) {
+ spin_unlock(&dev->zone_res_lock);
+ return ret;
+ }
+ break;
+ case BLK_ZONE_COND_IMP_OPEN:
+ dev->nr_zones_imp_open--;
+ break;
+ case BLK_ZONE_COND_CLOSED:
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK) {
+ spin_unlock(&dev->zone_res_lock);
+ return ret;
+ }
+ dev->nr_zones_closed--;
+ break;
+ default:
+ break;
+ }
- return ret;
+ dev->nr_zones_exp_open++;
+
+ spin_unlock(&dev->zone_res_lock);
+ }
+
+ zone->cond = BLK_ZONE_COND_EXP_OPEN;
+
+ return BLK_STS_OK;
}
static blk_status_t null_close_zone(struct nullb_device *dev,
struct nullb_zone *zone)
{
- blk_status_t ret;
-
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return BLK_STS_IOERR;
- null_lock_zone_res(dev);
- ret = __null_close_zone(dev, zone);
- null_unlock_zone_res(dev);
+ switch (zone->cond) {
+ case BLK_ZONE_COND_CLOSED:
+ /* close operation on closed is not an error */
+ return BLK_STS_OK;
+ case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_FULL:
+ default:
+ return BLK_STS_IOERR;
+ }
+
+ if (dev->need_zone_res_mgmt) {
+ spin_lock(&dev->zone_res_lock);
- return ret;
+ switch (zone->cond) {
+ case BLK_ZONE_COND_IMP_OPEN:
+ dev->nr_zones_imp_open--;
+ break;
+ case BLK_ZONE_COND_EXP_OPEN:
+ dev->nr_zones_exp_open--;
+ break;
+ default:
+ break;
+ }
+
+ if (zone->wp > zone->start)
+ dev->nr_zones_closed++;
+
+ spin_unlock(&dev->zone_res_lock);
+ }
+
+ if (zone->wp == zone->start)
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ else
+ zone->cond = BLK_ZONE_COND_CLOSED;
+
+ return BLK_STS_OK;
}
static blk_status_t null_finish_zone(struct nullb_device *dev,
@@ -512,41 +535,47 @@ static blk_status_t null_finish_zone(struct nullb_device *dev,
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return BLK_STS_IOERR;
- null_lock_zone_res(dev);
+ if (dev->need_zone_res_mgmt) {
+ spin_lock(&dev->zone_res_lock);
- switch (zone->cond) {
- case BLK_ZONE_COND_FULL:
- /* finish operation on full is not an error */
- goto unlock;
- case BLK_ZONE_COND_EMPTY:
- ret = null_check_zone_resources(dev, zone);
- if (ret != BLK_STS_OK)
- goto unlock;
- break;
- case BLK_ZONE_COND_IMP_OPEN:
- dev->nr_zones_imp_open--;
- break;
- case BLK_ZONE_COND_EXP_OPEN:
- dev->nr_zones_exp_open--;
- break;
- case BLK_ZONE_COND_CLOSED:
- ret = null_check_zone_resources(dev, zone);
- if (ret != BLK_STS_OK)
- goto unlock;
- dev->nr_zones_closed--;
- break;
- default:
- ret = BLK_STS_IOERR;
- goto unlock;
+ switch (zone->cond) {
+ case BLK_ZONE_COND_FULL:
+ /* Finish operation on full is not an error */
+ spin_unlock(&dev->zone_res_lock);
+ return BLK_STS_OK;
+ case BLK_ZONE_COND_EMPTY:
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK) {
+ spin_unlock(&dev->zone_res_lock);
+ return ret;
+ }
+ break;
+ case BLK_ZONE_COND_IMP_OPEN:
+ dev->nr_zones_imp_open--;
+ break;
+ case BLK_ZONE_COND_EXP_OPEN:
+ dev->nr_zones_exp_open--;
+ break;
+ case BLK_ZONE_COND_CLOSED:
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK) {
+ spin_unlock(&dev->zone_res_lock);
+ return ret;
+ }
+ dev->nr_zones_closed--;
+ break;
+ default:
+ spin_unlock(&dev->zone_res_lock);
+ return BLK_STS_IOERR;
+ }
+
+ spin_unlock(&dev->zone_res_lock);
}
zone->cond = BLK_ZONE_COND_FULL;
zone->wp = zone->start + zone->len;
-unlock:
- null_unlock_zone_res(dev);
-
- return ret;
+ return BLK_STS_OK;
}
static blk_status_t null_reset_zone(struct nullb_device *dev,
@@ -555,34 +584,33 @@ static blk_status_t null_reset_zone(struct nullb_device *dev,
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return BLK_STS_IOERR;
- null_lock_zone_res(dev);
+ if (dev->need_zone_res_mgmt) {
+ spin_lock(&dev->zone_res_lock);
- switch (zone->cond) {
- case BLK_ZONE_COND_EMPTY:
- /* reset operation on empty is not an error */
- null_unlock_zone_res(dev);
- return BLK_STS_OK;
- case BLK_ZONE_COND_IMP_OPEN:
- dev->nr_zones_imp_open--;
- break;
- case BLK_ZONE_COND_EXP_OPEN:
- dev->nr_zones_exp_open--;
- break;
- case BLK_ZONE_COND_CLOSED:
- dev->nr_zones_closed--;
- break;
- case BLK_ZONE_COND_FULL:
- break;
- default:
- null_unlock_zone_res(dev);
- return BLK_STS_IOERR;
+ switch (zone->cond) {
+ case BLK_ZONE_COND_IMP_OPEN:
+ dev->nr_zones_imp_open--;
+ break;
+ case BLK_ZONE_COND_EXP_OPEN:
+ dev->nr_zones_exp_open--;
+ break;
+ case BLK_ZONE_COND_CLOSED:
+ dev->nr_zones_closed--;
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_FULL:
+ break;
+ default:
+ spin_unlock(&dev->zone_res_lock);
+ return BLK_STS_IOERR;
+ }
+
+ spin_unlock(&dev->zone_res_lock);
}
zone->cond = BLK_ZONE_COND_EMPTY;
zone->wp = zone->start;
- null_unlock_zone_res(dev);
-
if (dev->memory_backed)
return null_handle_discard(dev, zone->start, zone->len);
@@ -711,7 +739,7 @@ static void null_set_zone_cond(struct nullb_device *dev,
zone->cond != BLK_ZONE_COND_OFFLINE)
null_finish_zone(dev, zone);
zone->cond = cond;
- zone->wp = (sector_t)-1;
+ zone->wp = NULL_ZONE_INVALID_WP;
}
null_unlock_zone(dev, zone);
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index bea3d5cf8a83..176657dce3e3 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -221,7 +221,7 @@ static int ublk_get_nr_zones(const struct ublk_device *ub)
static int ublk_revalidate_disk_zones(struct ublk_device *ub)
{
- return blk_revalidate_disk_zones(ub->ub_disk, NULL);
+ return blk_revalidate_disk_zones(ub->ub_disk);
}
static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
@@ -249,8 +249,7 @@ static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
static void ublk_dev_param_zoned_apply(struct ublk_device *ub)
{
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue);
- blk_queue_required_elevator_features(ub->ub_disk->queue,
- ELEVATOR_F_ZBD_SEQ_WRITE);
+
ub->ub_disk->nr_zones = ublk_get_nr_zones(ub);
}
@@ -2177,7 +2176,8 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
.max_hw_sectors = p->max_sectors,
.chunk_sectors = p->chunk_sectors,
.virt_boundary_mask = p->virt_boundary_mask,
-
+ .max_segments = USHRT_MAX,
+ .max_segment_size = UINT_MAX,
};
struct gendisk *disk;
int ret = -EINVAL;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 42dea7601d87..c1af0a7d56c8 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -1543,7 +1543,7 @@ static int virtblk_probe(struct virtio_device *vdev)
*/
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && lim.zoned) {
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, vblk->disk->queue);
- err = blk_revalidate_disk_zones(vblk->disk, NULL);
+ err = blk_revalidate_disk_zones(vblk->disk);
if (err)
goto out_cleanup_disk;
}
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index ac8ebccd3507..812fd2a8f853 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -380,8 +380,10 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
switch (data->cd_info.state) {
case HCI_DEVCOREDUMP_IDLE:
err = hci_devcd_init(hdev, MTK_COREDUMP_SIZE);
- if (err < 0)
+ if (err < 0) {
+ kfree_skb(skb);
break;
+ }
data->cd_info.cnt = 0;
/* It is supposed coredump can be done within 5 seconds */
@@ -407,9 +409,6 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
break;
}
- if (err < 0)
- kfree_skb(skb);
-
return err;
}
EXPORT_SYMBOL_GPL(btmtk_process_coredump);
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 19cfc342fc7b..638074992c82 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -99,7 +99,8 @@ static int qca_read_fw_build_info(struct hci_dev *hdev)
{
struct sk_buff *skb;
struct edl_event_hdr *edl;
- char cmd, build_label[QCA_FW_BUILD_VER_LEN];
+ char *build_label;
+ char cmd;
int build_lbl_len, err = 0;
bt_dev_dbg(hdev, "QCA read fw build info");
@@ -114,6 +115,11 @@ static int qca_read_fw_build_info(struct hci_dev *hdev)
return err;
}
+ if (skb->len < sizeof(*edl)) {
+ err = -EILSEQ;
+ goto out;
+ }
+
edl = (struct edl_event_hdr *)(skb->data);
if (!edl) {
bt_dev_err(hdev, "QCA read fw build info with no header");
@@ -129,14 +135,25 @@ static int qca_read_fw_build_info(struct hci_dev *hdev)
goto out;
}
+ if (skb->len < sizeof(*edl) + 1) {
+ err = -EILSEQ;
+ goto out;
+ }
+
build_lbl_len = edl->data[0];
- if (build_lbl_len <= QCA_FW_BUILD_VER_LEN - 1) {
- memcpy(build_label, edl->data + 1, build_lbl_len);
- *(build_label + build_lbl_len) = '\0';
+
+ if (skb->len < sizeof(*edl) + 1 + build_lbl_len) {
+ err = -EILSEQ;
+ goto out;
}
+ build_label = kstrndup(&edl->data[1], build_lbl_len, GFP_KERNEL);
+ if (!build_label)
+ goto out;
+
hci_set_fw_info(hdev, "%s", build_label);
+ kfree(build_label);
out:
kfree_skb(skb);
return err;
@@ -235,6 +252,11 @@ static int qca_read_fw_board_id(struct hci_dev *hdev, u16 *bid)
goto out;
}
+ if (skb->len < 3) {
+ err = -EILSEQ;
+ goto out;
+ }
+
*bid = (edl->data[1] << 8) + edl->data[2];
bt_dev_dbg(hdev, "%s: bid = %x", __func__, *bid);
@@ -265,9 +287,10 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
}
EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
-static void qca_tlv_check_data(struct hci_dev *hdev,
+static int qca_tlv_check_data(struct hci_dev *hdev,
struct qca_fw_config *config,
- u8 *fw_data, enum qca_btsoc_type soc_type)
+ u8 *fw_data, size_t fw_size,
+ enum qca_btsoc_type soc_type)
{
const u8 *data;
u32 type_len;
@@ -277,12 +300,16 @@ static void qca_tlv_check_data(struct hci_dev *hdev,
struct tlv_type_patch *tlv_patch;
struct tlv_type_nvm *tlv_nvm;
uint8_t nvm_baud_rate = config->user_baud_rate;
+ u8 type;
config->dnld_mode = QCA_SKIP_EVT_NONE;
config->dnld_type = QCA_SKIP_EVT_NONE;
switch (config->type) {
case ELF_TYPE_PATCH:
+ if (fw_size < 7)
+ return -EINVAL;
+
config->dnld_mode = QCA_SKIP_EVT_VSE_CC;
config->dnld_type = QCA_SKIP_EVT_VSE_CC;
@@ -291,6 +318,9 @@ static void qca_tlv_check_data(struct hci_dev *hdev,
bt_dev_dbg(hdev, "File version : 0x%x", fw_data[6]);
break;
case TLV_TYPE_PATCH:
+ if (fw_size < sizeof(struct tlv_type_hdr) + sizeof(struct tlv_type_patch))
+ return -EINVAL;
+
tlv = (struct tlv_type_hdr *)fw_data;
type_len = le32_to_cpu(tlv->type_len);
tlv_patch = (struct tlv_type_patch *)tlv->data;
@@ -330,25 +360,64 @@ static void qca_tlv_check_data(struct hci_dev *hdev,
break;
case TLV_TYPE_NVM:
+ if (fw_size < sizeof(struct tlv_type_hdr))
+ return -EINVAL;
+
tlv = (struct tlv_type_hdr *)fw_data;
type_len = le32_to_cpu(tlv->type_len);
- length = (type_len >> 8) & 0x00ffffff;
+ length = type_len >> 8;
+ type = type_len & 0xff;
- BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff);
+ /* Some NVM files have more than one set of tags, only parse
+ * the first set when it has type 2 for now. When there is
+ * more than one set there is an enclosing header of type 4.
+ */
+ if (type == 4) {
+ if (fw_size < 2 * sizeof(struct tlv_type_hdr))
+ return -EINVAL;
+
+ tlv++;
+
+ type_len = le32_to_cpu(tlv->type_len);
+ length = type_len >> 8;
+ type = type_len & 0xff;
+ }
+
+ BT_DBG("TLV Type\t\t : 0x%x", type);
BT_DBG("Length\t\t : %d bytes", length);
+ if (type != 2)
+ break;
+
+ if (fw_size < length + (tlv->data - fw_data))
+ return -EINVAL;
+
idx = 0;
data = tlv->data;
- while (idx < length) {
+ while (idx < length - sizeof(struct tlv_type_nvm)) {
tlv_nvm = (struct tlv_type_nvm *)(data + idx);
tag_id = le16_to_cpu(tlv_nvm->tag_id);
tag_len = le16_to_cpu(tlv_nvm->tag_len);
+ if (length < idx + sizeof(struct tlv_type_nvm) + tag_len)
+ return -EINVAL;
+
/* Update NVM tags as needed */
switch (tag_id) {
+ case EDL_TAG_ID_BD_ADDR:
+ if (tag_len != sizeof(bdaddr_t))
+ return -EINVAL;
+
+ memcpy(&config->bdaddr, tlv_nvm->data, sizeof(bdaddr_t));
+
+ break;
+
case EDL_TAG_ID_HCI:
+ if (tag_len < 3)
+ return -EINVAL;
+
/* HCI transport layer parameters
* enabling software inband sleep
* onto controller side.
@@ -364,6 +433,9 @@ static void qca_tlv_check_data(struct hci_dev *hdev,
break;
case EDL_TAG_ID_DEEP_SLEEP:
+ if (tag_len < 1)
+ return -EINVAL;
+
/* Sleep enable mask
* enabling deep sleep feature on controller.
*/
@@ -372,14 +444,16 @@ static void qca_tlv_check_data(struct hci_dev *hdev,
break;
}
- idx += (sizeof(u16) + sizeof(u16) + 8 + tag_len);
+ idx += sizeof(struct tlv_type_nvm) + tag_len;
}
break;
default:
BT_ERR("Unknown TLV type %d", config->type);
- break;
+ return -EINVAL;
}
+
+ return 0;
}
static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size,
@@ -529,7 +603,9 @@ static int qca_download_firmware(struct hci_dev *hdev,
memcpy(data, fw->data, size);
release_firmware(fw);
- qca_tlv_check_data(hdev, config, data, soc_type);
+ ret = qca_tlv_check_data(hdev, config, data, size, soc_type);
+ if (ret)
+ goto out;
segment = data;
remain = size;
@@ -612,6 +688,38 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
}
EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
+static int qca_check_bdaddr(struct hci_dev *hdev, const struct qca_fw_config *config)
+{
+ struct hci_rp_read_bd_addr *bda;
+ struct sk_buff *skb;
+ int err;
+
+ if (bacmp(&hdev->public_addr, BDADDR_ANY))
+ return 0;
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Failed to read device address (%d)", err);
+ return err;
+ }
+
+ if (skb->len != sizeof(*bda)) {
+ bt_dev_err(hdev, "Device address length mismatch");
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+ bda = (struct hci_rp_read_bd_addr *)skb->data;
+ if (!bacmp(&bda->bdaddr, &config->bdaddr))
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+
+ kfree_skb(skb);
+
+ return 0;
+}
+
static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size,
struct qca_btsoc_version ver, u8 rom_ver, u16 bid)
{
@@ -633,7 +741,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, struct qca_btsoc_version ver,
const char *firmware_name)
{
- struct qca_fw_config config;
+ struct qca_fw_config config = {};
int err;
u8 rom_ver = 0;
u32 soc_ver;
@@ -818,6 +926,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
break;
}
+ err = qca_check_bdaddr(hdev, &config);
+ if (err)
+ return err;
+
bt_dev_info(hdev, "QCA setup on UART is completed");
return 0;
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index dc31984f71dc..215433fd76a1 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -29,6 +29,7 @@
#define EDL_PATCH_CONFIG_RES_EVT (0x00)
#define QCA_DISABLE_LOGGING_SUB_OP (0x14)
+#define EDL_TAG_ID_BD_ADDR 2
#define EDL_TAG_ID_HCI (17)
#define EDL_TAG_ID_DEEP_SLEEP (27)
@@ -47,7 +48,6 @@
#define get_soc_ver(soc_id, rom_ver) \
((le32_to_cpu(soc_id) << 16) | (le16_to_cpu(rom_ver)))
-#define QCA_FW_BUILD_VER_LEN 255
#define QCA_HSP_GF_SOC_ID 0x1200
#define QCA_HSP_GF_SOC_MASK 0x0000ff00
@@ -94,6 +94,7 @@ struct qca_fw_config {
uint8_t user_baud_rate;
enum qca_tlv_dnld_mode dnld_mode;
enum qca_tlv_dnld_mode dnld_type;
+ bdaddr_t bdaddr;
};
struct edl_event_hdr {
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 06e915b57283..e3946f7b736e 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -542,6 +542,8 @@ static const struct usb_device_id quirks_table[] = {
/* Realtek 8852BE Bluetooth devices */
{ USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0bda, 0x4853), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |
@@ -3480,13 +3482,12 @@ static void btusb_dump_hdr_qca(struct hci_dev *hdev, struct sk_buff *skb)
static void btusb_coredump_qca(struct hci_dev *hdev)
{
+ int err;
static const u8 param[] = { 0x26 };
- struct sk_buff *skb;
- skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT);
- if (IS_ERR(skb))
- bt_dev_err(hdev, "%s: triggle crash failed (%ld)", __func__, PTR_ERR(skb));
- kfree_skb(skb);
+ err = __hci_cmd_send(hdev, 0xfc0c, 1, param);
+ if (err < 0)
+ bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
}
/*
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index ecbc52eaf101..0c9c9ee56592 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -1672,6 +1672,9 @@ static bool qca_wakeup(struct hci_dev *hdev)
struct hci_uart *hu = hci_get_drvdata(hdev);
bool wakeup;
+ if (!hu->serdev)
+ return true;
+
/* BT SoC attached through the serial bus is handled by the serdev driver.
* So we need to use the device handle of the serdev driver to get the
* status of device may wakeup.
@@ -1905,8 +1908,6 @@ retry:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
-
qcadev = serdev_device_get_drvdata(hu->serdev);
if (qcadev->bdaddr_property_broken)
set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks);
@@ -1957,8 +1958,10 @@ retry:
qca_debugfs_init(hdev);
hu->hdev->hw_error = qca_hw_error;
hu->hdev->cmd_timeout = qca_cmd_timeout;
- if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
- hu->hdev->wakeup = qca_wakeup;
+ if (hu->serdev) {
+ if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
+ hu->hdev->wakeup = qca_wakeup;
+ }
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
set_bit(QCA_ROM_FW, &qca->flags);
@@ -2329,16 +2332,21 @@ static int qca_serdev_probe(struct serdev_device *serdev)
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855)) {
dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
- power_ctrl_enabled = false;
+ return PTR_ERR(qcadev->bt_en);
}
+ if (!qcadev->bt_en)
+ power_ctrl_enabled = false;
+
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
GPIOD_IN);
if (IS_ERR(qcadev->sw_ctrl) &&
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855 ||
- data->soc_type == QCA_WCN7850))
- dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
+ data->soc_type == QCA_WCN7850)) {
+ dev_err(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
+ return PTR_ERR(qcadev->sw_ctrl);
+ }
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
if (IS_ERR(qcadev->susclk)) {
@@ -2357,10 +2365,13 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
if (IS_ERR(qcadev->bt_en)) {
- dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
- power_ctrl_enabled = false;
+ dev_err(&serdev->dev, "failed to acquire enable gpio\n");
+ return PTR_ERR(qcadev->bt_en);
}
+ if (!qcadev->bt_en)
+ power_ctrl_enabled = false;
+
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
if (IS_ERR(qcadev->susclk)) {
dev_warn(&serdev->dev, "failed to acquire clk\n");
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index d5e7fa9173a1..64cd2ee03aa3 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -163,6 +163,16 @@ config QCOM_SSC_BLOCK_BUS
i2c/spi/uart controllers, a hexagon core, and a clock controller
which provides clocks for the above.
+config STM32_FIREWALL
+ bool "STM32 Firewall framework"
+ depends on (ARCH_STM32 || COMPILE_TEST) && OF
+ select OF_DYNAMIC
+ help
+ Say y to enable STM32 firewall framework and its services. Firewall
+ controllers will be able to register to the framework. Access for
+ hardware resources linked to a firewall controller can be requested
+ through this STM32 framework.
+
config SUN50I_DE2_BUS
bool "Allwinner A64 DE2 Bus Driver"
default ARM64
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index d90eed189a65..cddd4984d6af 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o
obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
obj-$(CONFIG_QCOM_EBI2) += qcom-ebi2.o
obj-$(CONFIG_QCOM_SSC_BLOCK_BUS) += qcom-ssc-block-bus.o
+obj-$(CONFIG_STM32_FIREWALL) += stm32_firewall.o stm32_rifsc.o stm32_etzpc.o
obj-$(CONFIG_SUN50I_DE2_BUS) += sun50i-de2.o
obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
obj-$(CONFIG_OF) += simple-pm-bus.o
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index 65ae758f3194..ee29162da4ee 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -410,6 +410,7 @@ static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
{ .compatible = "brcm,bcm74165-gisb-arb", .data = gisb_offsets_bcm74165 },
{ },
};
+MODULE_DEVICE_TABLE(of, brcmstb_gisb_arb_of_match);
static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
{
diff --git a/drivers/bus/stm32_etzpc.c b/drivers/bus/stm32_etzpc.c
new file mode 100644
index 000000000000..7fc0f16960be
--- /dev/null
+++ b/drivers/bus/stm32_etzpc.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023, STMicroelectronics - All Rights Reserved
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include "stm32_firewall.h"
+
+/*
+ * ETZPC registers
+ */
+#define ETZPC_DECPROT 0x10
+#define ETZPC_HWCFGR 0x3F0
+
+/*
+ * HWCFGR register
+ */
+#define ETZPC_HWCFGR_NUM_TZMA GENMASK(7, 0)
+#define ETZPC_HWCFGR_NUM_PER_SEC GENMASK(15, 8)
+#define ETZPC_HWCFGR_NUM_AHB_SEC GENMASK(23, 16)
+#define ETZPC_HWCFGR_CHUNKS1N4 GENMASK(31, 24)
+
+/*
+ * ETZPC miscellaneous
+ */
+#define ETZPC_PROT_MASK GENMASK(1, 0)
+#define ETZPC_PROT_A7NS 0x3
+#define ETZPC_DECPROT_SHIFT 1
+
+#define IDS_PER_DECPROT_REGS 16
+
+static int stm32_etzpc_grant_access(struct stm32_firewall_controller *ctrl, u32 firewall_id)
+{
+ u32 offset, reg_offset, sec_val;
+
+ if (firewall_id >= ctrl->max_entries) {
+ dev_err(ctrl->dev, "Invalid sys bus ID %u", firewall_id);
+ return -EINVAL;
+ }
+
+ /* Check access configuration, 16 peripherals per register */
+ reg_offset = ETZPC_DECPROT + 0x4 * (firewall_id / IDS_PER_DECPROT_REGS);
+ offset = (firewall_id % IDS_PER_DECPROT_REGS) << ETZPC_DECPROT_SHIFT;
+
+ /* Verify peripheral is non-secure and attributed to cortex A7 */
+ sec_val = (readl(ctrl->mmio + reg_offset) >> offset) & ETZPC_PROT_MASK;
+ if (sec_val != ETZPC_PROT_A7NS) {
+ dev_dbg(ctrl->dev, "Invalid bus configuration: reg_offset %#x, value %d\n",
+ reg_offset, sec_val);
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+static void stm32_etzpc_release_access(struct stm32_firewall_controller *ctrl __maybe_unused,
+ u32 firewall_id __maybe_unused)
+{
+}
+
+static int stm32_etzpc_probe(struct platform_device *pdev)
+{
+ struct stm32_firewall_controller *etzpc_controller;
+ struct device_node *np = pdev->dev.of_node;
+ u32 nb_per, nb_master;
+ struct resource *res;
+ void __iomem *mmio;
+ int rc;
+
+ etzpc_controller = devm_kzalloc(&pdev->dev, sizeof(*etzpc_controller), GFP_KERNEL);
+ if (!etzpc_controller)
+ return -ENOMEM;
+
+ mmio = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(mmio))
+ return PTR_ERR(mmio);
+
+ etzpc_controller->dev = &pdev->dev;
+ etzpc_controller->mmio = mmio;
+ etzpc_controller->name = dev_driver_string(etzpc_controller->dev);
+ etzpc_controller->type = STM32_PERIPHERAL_FIREWALL | STM32_MEMORY_FIREWALL;
+ etzpc_controller->grant_access = stm32_etzpc_grant_access;
+ etzpc_controller->release_access = stm32_etzpc_release_access;
+
+ /* Get number of etzpc entries*/
+ nb_per = FIELD_GET(ETZPC_HWCFGR_NUM_PER_SEC,
+ readl(etzpc_controller->mmio + ETZPC_HWCFGR));
+ nb_master = FIELD_GET(ETZPC_HWCFGR_NUM_AHB_SEC,
+ readl(etzpc_controller->mmio + ETZPC_HWCFGR));
+ etzpc_controller->max_entries = nb_per + nb_master;
+
+ platform_set_drvdata(pdev, etzpc_controller);
+
+ rc = stm32_firewall_controller_register(etzpc_controller);
+ if (rc) {
+ dev_err(etzpc_controller->dev, "Couldn't register as a firewall controller: %d",
+ rc);
+ return rc;
+ }
+
+ rc = stm32_firewall_populate_bus(etzpc_controller);
+ if (rc) {
+ dev_err(etzpc_controller->dev, "Couldn't populate ETZPC bus: %d",
+ rc);
+ return rc;
+ }
+
+ /* Populate all allowed nodes */
+ return of_platform_populate(np, NULL, NULL, &pdev->dev);
+}
+
+static const struct of_device_id stm32_etzpc_of_match[] = {
+ { .compatible = "st,stm32-etzpc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_etzpc_of_match);
+
+static struct platform_driver stm32_etzpc_driver = {
+ .probe = stm32_etzpc_probe,
+ .driver = {
+ .name = "stm32-etzpc",
+ .of_match_table = stm32_etzpc_of_match,
+ },
+};
+module_platform_driver(stm32_etzpc_driver);
+
+MODULE_AUTHOR("Gatien Chevallier <gatien.chevallier@foss.st.com>");
+MODULE_DESCRIPTION("STMicroelectronics ETZPC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/stm32_firewall.c b/drivers/bus/stm32_firewall.c
new file mode 100644
index 000000000000..2fc9761dadec
--- /dev/null
+++ b/drivers/bus/stm32_firewall.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023, STMicroelectronics - All Rights Reserved
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/bus/stm32_firewall_device.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#include "stm32_firewall.h"
+
+/* Corresponds to STM32_FIREWALL_MAX_EXTRA_ARGS + firewall ID */
+#define STM32_FIREWALL_MAX_ARGS (STM32_FIREWALL_MAX_EXTRA_ARGS + 1)
+
+static LIST_HEAD(firewall_controller_list);
+static DEFINE_MUTEX(firewall_controller_list_lock);
+
+/* Firewall device API */
+
+int stm32_firewall_get_firewall(struct device_node *np, struct stm32_firewall *firewall,
+ unsigned int nb_firewall)
+{
+ struct stm32_firewall_controller *ctrl;
+ struct of_phandle_iterator it;
+ unsigned int i, j = 0;
+ int err;
+
+ if (!firewall || !nb_firewall)
+ return -EINVAL;
+
+ /* Parse property with phandle parsed out */
+ of_for_each_phandle(&it, err, np, "access-controllers", "#access-controller-cells", 0) {
+ struct of_phandle_args provider_args;
+ struct device_node *provider = it.node;
+ const char *fw_entry;
+ bool match = false;
+
+ if (err) {
+ pr_err("Unable to get access-controllers property for node %s\n, err: %d",
+ np->full_name, err);
+ of_node_put(provider);
+ return err;
+ }
+
+ if (j >= nb_firewall) {
+ pr_err("Too many firewall controllers");
+ of_node_put(provider);
+ return -EINVAL;
+ }
+
+ provider_args.args_count = of_phandle_iterator_args(&it, provider_args.args,
+ STM32_FIREWALL_MAX_ARGS);
+
+ /* Check if the parsed phandle corresponds to a registered firewall controller */
+ mutex_lock(&firewall_controller_list_lock);
+ list_for_each_entry(ctrl, &firewall_controller_list, entry) {
+ if (ctrl->dev->of_node->phandle == it.phandle) {
+ match = true;
+ firewall[j].firewall_ctrl = ctrl;
+ break;
+ }
+ }
+ mutex_unlock(&firewall_controller_list_lock);
+
+ if (!match) {
+ firewall[j].firewall_ctrl = NULL;
+ pr_err("No firewall controller registered for %s\n", np->full_name);
+ of_node_put(provider);
+ return -ENODEV;
+ }
+
+ err = of_property_read_string_index(np, "access-controller-names", j, &fw_entry);
+ if (err == 0)
+ firewall[j].entry = fw_entry;
+
+ /* Handle the case when there are no arguments given along with the phandle */
+ if (provider_args.args_count < 0 ||
+ provider_args.args_count > STM32_FIREWALL_MAX_ARGS) {
+ of_node_put(provider);
+ return -EINVAL;
+ } else if (provider_args.args_count == 0) {
+ firewall[j].extra_args_size = 0;
+ firewall[j].firewall_id = U32_MAX;
+ j++;
+ continue;
+ }
+
+ /* The firewall ID is always the first argument */
+ firewall[j].firewall_id = provider_args.args[0];
+
+ /* Extra args start at the second argument */
+ for (i = 0; i < provider_args.args_count - 1; i++)
+ firewall[j].extra_args[i] = provider_args.args[i + 1];
+
+ /* Remove the firewall ID arg that is not an extra argument */
+ firewall[j].extra_args_size = provider_args.args_count - 1;
+
+ j++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_get_firewall);
+
+int stm32_firewall_grant_access(struct stm32_firewall *firewall)
+{
+ struct stm32_firewall_controller *firewall_controller;
+
+ if (!firewall || firewall->firewall_id == U32_MAX)
+ return -EINVAL;
+
+ firewall_controller = firewall->firewall_ctrl;
+
+ if (!firewall_controller)
+ return -ENODEV;
+
+ return firewall_controller->grant_access(firewall_controller, firewall->firewall_id);
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_grant_access);
+
+int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id)
+{
+ struct stm32_firewall_controller *firewall_controller;
+
+ if (!firewall || subsystem_id == U32_MAX || firewall->firewall_id == U32_MAX)
+ return -EINVAL;
+
+ firewall_controller = firewall->firewall_ctrl;
+
+ if (!firewall_controller)
+ return -ENODEV;
+
+ return firewall_controller->grant_access(firewall_controller, subsystem_id);
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_grant_access_by_id);
+
+void stm32_firewall_release_access(struct stm32_firewall *firewall)
+{
+ struct stm32_firewall_controller *firewall_controller;
+
+ if (!firewall || firewall->firewall_id == U32_MAX) {
+ pr_debug("Incorrect arguments when releasing a firewall access\n");
+ return;
+ }
+
+ firewall_controller = firewall->firewall_ctrl;
+
+ if (!firewall_controller) {
+ pr_debug("No firewall controller to release\n");
+ return;
+ }
+
+ firewall_controller->release_access(firewall_controller, firewall->firewall_id);
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_release_access);
+
+void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id)
+{
+ struct stm32_firewall_controller *firewall_controller;
+
+ if (!firewall || subsystem_id == U32_MAX || firewall->firewall_id == U32_MAX) {
+ pr_debug("Incorrect arguments when releasing a firewall access");
+ return;
+ }
+
+ firewall_controller = firewall->firewall_ctrl;
+
+ if (!firewall_controller) {
+ pr_debug("No firewall controller to release");
+ return;
+ }
+
+ firewall_controller->release_access(firewall_controller, subsystem_id);
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_release_access_by_id);
+
+/* Firewall controller API */
+
+int stm32_firewall_controller_register(struct stm32_firewall_controller *firewall_controller)
+{
+ struct stm32_firewall_controller *ctrl;
+
+ if (!firewall_controller)
+ return -ENODEV;
+
+ pr_info("Registering %s firewall controller\n", firewall_controller->name);
+
+ mutex_lock(&firewall_controller_list_lock);
+ list_for_each_entry(ctrl, &firewall_controller_list, entry) {
+ if (ctrl == firewall_controller) {
+ pr_debug("%s firewall controller already registered\n",
+ firewall_controller->name);
+ mutex_unlock(&firewall_controller_list_lock);
+ return 0;
+ }
+ }
+ list_add_tail(&firewall_controller->entry, &firewall_controller_list);
+ mutex_unlock(&firewall_controller_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_controller_register);
+
+void stm32_firewall_controller_unregister(struct stm32_firewall_controller *firewall_controller)
+{
+ struct stm32_firewall_controller *ctrl;
+ bool controller_removed = false;
+
+ if (!firewall_controller) {
+ pr_debug("Null reference while unregistering firewall controller\n");
+ return;
+ }
+
+ mutex_lock(&firewall_controller_list_lock);
+ list_for_each_entry(ctrl, &firewall_controller_list, entry) {
+ if (ctrl == firewall_controller) {
+ controller_removed = true;
+ list_del_init(&ctrl->entry);
+ break;
+ }
+ }
+ mutex_unlock(&firewall_controller_list_lock);
+
+ if (!controller_removed)
+ pr_debug("There was no firewall controller named %s to unregister\n",
+ firewall_controller->name);
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_controller_unregister);
+
+int stm32_firewall_populate_bus(struct stm32_firewall_controller *firewall_controller)
+{
+ struct stm32_firewall *firewalls;
+ struct device_node *child;
+ struct device *parent;
+ unsigned int i;
+ int len;
+ int err;
+
+ parent = firewall_controller->dev;
+
+ dev_dbg(parent, "Populating %s system bus\n", dev_name(firewall_controller->dev));
+
+ for_each_available_child_of_node(dev_of_node(parent), child) {
+ /* The access-controllers property is mandatory for firewall bus devices */
+ len = of_count_phandle_with_args(child, "access-controllers",
+ "#access-controller-cells");
+ if (len <= 0) {
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ firewalls = kcalloc(len, sizeof(*firewalls), GFP_KERNEL);
+ if (!firewalls) {
+ of_node_put(child);
+ return -ENOMEM;
+ }
+
+ err = stm32_firewall_get_firewall(child, firewalls, (unsigned int)len);
+ if (err) {
+ kfree(firewalls);
+ of_node_put(child);
+ return err;
+ }
+
+ for (i = 0; i < len; i++) {
+ if (firewall_controller->grant_access(firewall_controller,
+ firewalls[i].firewall_id)) {
+ /*
+ * Peripheral access not allowed or not defined.
+ * Mark the node as populated so platform bus won't probe it
+ */
+ of_detach_node(child);
+ dev_err(parent, "%s: Device driver will not be probed\n",
+ child->full_name);
+ }
+ }
+
+ kfree(firewalls);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_populate_bus);
diff --git a/drivers/bus/stm32_firewall.h b/drivers/bus/stm32_firewall.h
new file mode 100644
index 000000000000..e5fac85fe346
--- /dev/null
+++ b/drivers/bus/stm32_firewall.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023, STMicroelectronics - All Rights Reserved
+ */
+
+#ifndef _STM32_FIREWALL_H
+#define _STM32_FIREWALL_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+/**
+ * STM32_PERIPHERAL_FIREWALL: This type of firewall protects peripherals
+ * STM32_MEMORY_FIREWALL: This type of firewall protects memories/subsets of memory
+ * zones
+ * STM32_NOTYPE_FIREWALL: Undefined firewall type
+ */
+
+#define STM32_PERIPHERAL_FIREWALL BIT(1)
+#define STM32_MEMORY_FIREWALL BIT(2)
+#define STM32_NOTYPE_FIREWALL BIT(3)
+
+/**
+ * struct stm32_firewall_controller - Information on firewall controller supplying services
+ *
+ * @name: Name of the firewall controller
+ * @dev: Device reference of the firewall controller
+ * @mmio: Base address of the firewall controller
+ * @entry: List entry of the firewall controller list
+ * @type: Type of firewall
+ * @max_entries: Number of entries covered by the firewall
+ * @grant_access: Callback used to grant access for a device access against a
+ * firewall controller
+ * @release_access: Callback used to release resources taken by a device when access was
+ * granted
+ * @grant_memory_range_access: Callback used to grant access for a device to a given memory region
+ */
+struct stm32_firewall_controller {
+ const char *name;
+ struct device *dev;
+ void __iomem *mmio;
+ struct list_head entry;
+ unsigned int type;
+ unsigned int max_entries;
+
+ int (*grant_access)(struct stm32_firewall_controller *ctrl, u32 id);
+ void (*release_access)(struct stm32_firewall_controller *ctrl, u32 id);
+ int (*grant_memory_range_access)(struct stm32_firewall_controller *ctrl, phys_addr_t paddr,
+ size_t size);
+};
+
+/**
+ * stm32_firewall_controller_register - Register a firewall controller to the STM32 firewall
+ * framework
+ * @firewall_controller: Firewall controller to register
+ *
+ * Returns 0 in case of success or -ENODEV if no controller was given.
+ */
+int stm32_firewall_controller_register(struct stm32_firewall_controller *firewall_controller);
+
+/**
+ * stm32_firewall_controller_unregister - Unregister a firewall controller from the STM32
+ * firewall framework
+ * @firewall_controller: Firewall controller to unregister
+ */
+void stm32_firewall_controller_unregister(struct stm32_firewall_controller *firewall_controller);
+
+/**
+ * stm32_firewall_populate_bus - Populate device tree nodes that have a correct firewall
+ * configuration. This is used at boot-time only, as a sanity check
+ * between device tree and firewalls hardware configurations to
+ * prevent a kernel crash when a device driver is not granted access
+ *
+ * @firewall_controller: Firewall controller which nodes will be populated or not
+ *
+ * Returns 0 in case of success or appropriate errno code if error occurred.
+ */
+int stm32_firewall_populate_bus(struct stm32_firewall_controller *firewall_controller);
+
+#endif /* _STM32_FIREWALL_H */
diff --git a/drivers/bus/stm32_rifsc.c b/drivers/bus/stm32_rifsc.c
new file mode 100644
index 000000000000..4cf1b60014b7
--- /dev/null
+++ b/drivers/bus/stm32_rifsc.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023, STMicroelectronics - All Rights Reserved
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include "stm32_firewall.h"
+
+/*
+ * RIFSC offset register
+ */
+#define RIFSC_RISC_SECCFGR0 0x10
+#define RIFSC_RISC_PRIVCFGR0 0x30
+#define RIFSC_RISC_PER0_CIDCFGR 0x100
+#define RIFSC_RISC_PER0_SEMCR 0x104
+#define RIFSC_RISC_HWCFGR2 0xFEC
+
+/*
+ * SEMCR register
+ */
+#define SEMCR_MUTEX BIT(0)
+
+/*
+ * HWCFGR2 register
+ */
+#define HWCFGR2_CONF1_MASK GENMASK(15, 0)
+#define HWCFGR2_CONF2_MASK GENMASK(23, 16)
+#define HWCFGR2_CONF3_MASK GENMASK(31, 24)
+
+/*
+ * RIFSC miscellaneous
+ */
+#define RIFSC_RISC_CFEN_MASK BIT(0)
+#define RIFSC_RISC_SEM_EN_MASK BIT(1)
+#define RIFSC_RISC_SCID_MASK GENMASK(6, 4)
+#define RIFSC_RISC_SEML_SHIFT 16
+#define RIFSC_RISC_SEMWL_MASK GENMASK(23, 16)
+#define RIFSC_RISC_PER_ID_MASK GENMASK(31, 24)
+
+#define RIFSC_RISC_PERx_CID_MASK (RIFSC_RISC_CFEN_MASK | \
+ RIFSC_RISC_SEM_EN_MASK | \
+ RIFSC_RISC_SCID_MASK | \
+ RIFSC_RISC_SEMWL_MASK)
+
+#define IDS_PER_RISC_SEC_PRIV_REGS 32
+
+/* RIF miscellaneous */
+/*
+ * CIDCFGR register fields
+ */
+#define CIDCFGR_CFEN BIT(0)
+#define CIDCFGR_SEMEN BIT(1)
+#define CIDCFGR_SEMWL(x) BIT(RIFSC_RISC_SEML_SHIFT + (x))
+
+#define SEMWL_SHIFT 16
+
+/* Compartiment IDs */
+#define RIF_CID0 0x0
+#define RIF_CID1 0x1
+
+static bool stm32_rifsc_is_semaphore_available(void __iomem *addr)
+{
+ return !(readl(addr) & SEMCR_MUTEX);
+}
+
+static int stm32_rif_acquire_semaphore(struct stm32_firewall_controller *stm32_firewall_controller,
+ int id)
+{
+ void __iomem *addr = stm32_firewall_controller->mmio + RIFSC_RISC_PER0_SEMCR + 0x8 * id;
+
+ writel(SEMCR_MUTEX, addr);
+
+ /* Check that CID1 has the semaphore */
+ if (stm32_rifsc_is_semaphore_available(addr) ||
+ FIELD_GET(RIFSC_RISC_SCID_MASK, readl(addr)) != RIF_CID1)
+ return -EACCES;
+
+ return 0;
+}
+
+static void stm32_rif_release_semaphore(struct stm32_firewall_controller *stm32_firewall_controller,
+ int id)
+{
+ void __iomem *addr = stm32_firewall_controller->mmio + RIFSC_RISC_PER0_SEMCR + 0x8 * id;
+
+ if (stm32_rifsc_is_semaphore_available(addr))
+ return;
+
+ writel(SEMCR_MUTEX, addr);
+
+ /* Ok if another compartment takes the semaphore before the check */
+ WARN_ON(!stm32_rifsc_is_semaphore_available(addr) &&
+ FIELD_GET(RIFSC_RISC_SCID_MASK, readl(addr)) == RIF_CID1);
+}
+
+static int stm32_rifsc_grant_access(struct stm32_firewall_controller *ctrl, u32 firewall_id)
+{
+ struct stm32_firewall_controller *rifsc_controller = ctrl;
+ u32 reg_offset, reg_id, sec_reg_value, cid_reg_value;
+ int rc;
+
+ if (firewall_id >= rifsc_controller->max_entries) {
+ dev_err(rifsc_controller->dev, "Invalid sys bus ID %u", firewall_id);
+ return -EINVAL;
+ }
+
+ /*
+ * RIFSC_RISC_PRIVCFGRx and RIFSC_RISC_SECCFGRx both handle configuration access for
+ * 32 peripherals. On the other hand, there is one _RIFSC_RISC_PERx_CIDCFGR register
+ * per peripheral
+ */
+ reg_id = firewall_id / IDS_PER_RISC_SEC_PRIV_REGS;
+ reg_offset = firewall_id % IDS_PER_RISC_SEC_PRIV_REGS;
+ sec_reg_value = readl(rifsc_controller->mmio + RIFSC_RISC_SECCFGR0 + 0x4 * reg_id);
+ cid_reg_value = readl(rifsc_controller->mmio + RIFSC_RISC_PER0_CIDCFGR + 0x8 * firewall_id);
+
+ /* First check conditions for semaphore mode, which doesn't take into account static CID. */
+ if ((cid_reg_value & CIDCFGR_SEMEN) && (cid_reg_value & CIDCFGR_CFEN)) {
+ if (cid_reg_value & BIT(RIF_CID1 + SEMWL_SHIFT)) {
+ /* Static CID is irrelevant if semaphore mode */
+ goto skip_cid_check;
+ } else {
+ dev_dbg(rifsc_controller->dev,
+ "Invalid bus semaphore configuration: index %d\n", firewall_id);
+ return -EACCES;
+ }
+ }
+
+ /*
+ * Skip CID check if CID filtering isn't enabled or filtering is enabled on CID0, which
+ * corresponds to whatever CID.
+ */
+ if (!(cid_reg_value & CIDCFGR_CFEN) ||
+ FIELD_GET(RIFSC_RISC_SCID_MASK, cid_reg_value) == RIF_CID0)
+ goto skip_cid_check;
+
+ /* Coherency check with the CID configuration */
+ if (FIELD_GET(RIFSC_RISC_SCID_MASK, cid_reg_value) != RIF_CID1) {
+ dev_dbg(rifsc_controller->dev, "Invalid CID configuration for peripheral: %d\n",
+ firewall_id);
+ return -EACCES;
+ }
+
+skip_cid_check:
+ /* Check security configuration */
+ if (sec_reg_value & BIT(reg_offset)) {
+ dev_dbg(rifsc_controller->dev,
+ "Invalid security configuration for peripheral: %d\n", firewall_id);
+ return -EACCES;
+ }
+
+ /*
+ * If the peripheral is in semaphore mode, take the semaphore so that
+ * the CID1 has the ownership.
+ */
+ if ((cid_reg_value & CIDCFGR_SEMEN) && (cid_reg_value & CIDCFGR_CFEN)) {
+ rc = stm32_rif_acquire_semaphore(rifsc_controller, firewall_id);
+ if (rc) {
+ dev_err(rifsc_controller->dev,
+ "Couldn't acquire semaphore for peripheral: %d\n", firewall_id);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static void stm32_rifsc_release_access(struct stm32_firewall_controller *ctrl, u32 firewall_id)
+{
+ stm32_rif_release_semaphore(ctrl, firewall_id);
+}
+
+static int stm32_rifsc_probe(struct platform_device *pdev)
+{
+ struct stm32_firewall_controller *rifsc_controller;
+ struct device_node *np = pdev->dev.of_node;
+ u32 nb_risup, nb_rimu, nb_risal;
+ struct resource *res;
+ void __iomem *mmio;
+ int rc;
+
+ rifsc_controller = devm_kzalloc(&pdev->dev, sizeof(*rifsc_controller), GFP_KERNEL);
+ if (!rifsc_controller)
+ return -ENOMEM;
+
+ mmio = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(mmio))
+ return PTR_ERR(mmio);
+
+ rifsc_controller->dev = &pdev->dev;
+ rifsc_controller->mmio = mmio;
+ rifsc_controller->name = dev_driver_string(rifsc_controller->dev);
+ rifsc_controller->type = STM32_PERIPHERAL_FIREWALL | STM32_MEMORY_FIREWALL;
+ rifsc_controller->grant_access = stm32_rifsc_grant_access;
+ rifsc_controller->release_access = stm32_rifsc_release_access;
+
+ /* Get number of RIFSC entries*/
+ nb_risup = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF1_MASK;
+ nb_rimu = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF2_MASK;
+ nb_risal = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF3_MASK;
+ rifsc_controller->max_entries = nb_risup + nb_rimu + nb_risal;
+
+ platform_set_drvdata(pdev, rifsc_controller);
+
+ rc = stm32_firewall_controller_register(rifsc_controller);
+ if (rc) {
+ dev_err(rifsc_controller->dev, "Couldn't register as a firewall controller: %d",
+ rc);
+ return rc;
+ }
+
+ rc = stm32_firewall_populate_bus(rifsc_controller);
+ if (rc) {
+ dev_err(rifsc_controller->dev, "Couldn't populate RIFSC bus: %d",
+ rc);
+ return rc;
+ }
+
+ /* Populate all allowed nodes */
+ return of_platform_populate(np, NULL, NULL, &pdev->dev);
+}
+
+static const struct of_device_id stm32_rifsc_of_match[] = {
+ { .compatible = "st,stm32mp25-rifsc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_rifsc_of_match);
+
+static struct platform_driver stm32_rifsc_driver = {
+ .probe = stm32_rifsc_probe,
+ .driver = {
+ .name = "stm32-rifsc",
+ .of_match_table = stm32_rifsc_of_match,
+ },
+};
+module_platform_driver(stm32_rifsc_driver);
+
+MODULE_AUTHOR("Gatien Chevallier <gatien.chevallier@foss.st.com>");
+MODULE_DESCRIPTION("STMicroelectronics RIFSC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 41d33f39efe5..8767e04d6c89 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -1,6 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
/*
* ti-sysc.c - Texas Instruments sysc interconnect target driver
+ *
+ * TI SoCs have an interconnect target wrapper IP for many devices. The wrapper
+ * IP manages clock gating, resets, and PM capabilities for the connected devices.
+ *
+ * Copyright (C) 2017-2024 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ * Many features are based on the earlier omap_hwmod arch code with thanks to all
+ * the people who developed and debugged the code over the years:
+ *
+ * Copyright (C) 2009-2011 Nokia Corporation
+ * Copyright (C) 2011-2021 Texas Instruments Incorporated - https://www.ti.com/
*/
#include <linux/io.h>
@@ -1458,8 +1469,7 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev)
ddata = dev_get_drvdata(dev);
- if (ddata->cfg.quirks &
- (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
+ if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)
return 0;
if (!ddata->enabled)
@@ -1477,8 +1487,7 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev)
ddata = dev_get_drvdata(dev);
- if (ddata->cfg.quirks &
- (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
+ if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)
return 0;
if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) {
@@ -1529,19 +1538,6 @@ struct sysc_revision_quirk {
}
static const struct sysc_revision_quirk sysc_revision_quirks[] = {
- /* These drivers need to be fixed to not use pm_runtime_irq_safe() */
- SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
- SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
- /* Uarts on omap4 and later */
- SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
- SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
- SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
-
/* Quirks that need to be set based on the module address */
SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT |
@@ -1599,6 +1595,17 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("sata", 0, 0xfc, 0x1100, -ENODEV, 0x5e412000, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT),
+ /* Uarts on omap4 and later */
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT),
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
@@ -2145,8 +2152,7 @@ static int sysc_reset(struct sysc *ddata)
sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
if (ddata->legacy_mode ||
- ddata->cap->regbits->srst_shift < 0 ||
- ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
+ ddata->cap->regbits->srst_shift < 0)
return 0;
sysc_mask = BIT(ddata->cap->regbits->srst_shift);
@@ -2240,12 +2246,14 @@ static int sysc_init_module(struct sysc *ddata)
goto err_main_clocks;
}
- error = sysc_reset(ddata);
- if (error)
- dev_err(ddata->dev, "Reset failed with %d\n", error);
+ if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) {
+ error = sysc_reset(ddata);
+ if (error)
+ dev_err(ddata->dev, "Reset failed with %d\n", error);
- if (error && !ddata->legacy_mode)
- sysc_disable_module(ddata->dev);
+ if (error && !ddata->legacy_mode)
+ sysc_disable_module(ddata->dev);
+ }
err_main_clocks:
if (error)
@@ -2447,89 +2455,6 @@ static int __maybe_unused sysc_child_runtime_resume(struct device *dev)
return pm_generic_runtime_resume(dev);
}
-#ifdef CONFIG_PM_SLEEP
-static int sysc_child_suspend_noirq(struct device *dev)
-{
- struct sysc *ddata;
- int error;
-
- ddata = sysc_child_to_parent(dev);
-
- dev_dbg(ddata->dev, "%s %s\n", __func__,
- ddata->name ? ddata->name : "");
-
- error = pm_generic_suspend_noirq(dev);
- if (error) {
- dev_err(dev, "%s error at %i: %i\n",
- __func__, __LINE__, error);
-
- return error;
- }
-
- if (!pm_runtime_status_suspended(dev)) {
- error = pm_generic_runtime_suspend(dev);
- if (error) {
- dev_dbg(dev, "%s busy at %i: %i\n",
- __func__, __LINE__, error);
-
- return 0;
- }
-
- error = sysc_runtime_suspend(ddata->dev);
- if (error) {
- dev_err(dev, "%s error at %i: %i\n",
- __func__, __LINE__, error);
-
- return error;
- }
-
- ddata->child_needs_resume = true;
- }
-
- return 0;
-}
-
-static int sysc_child_resume_noirq(struct device *dev)
-{
- struct sysc *ddata;
- int error;
-
- ddata = sysc_child_to_parent(dev);
-
- dev_dbg(ddata->dev, "%s %s\n", __func__,
- ddata->name ? ddata->name : "");
-
- if (ddata->child_needs_resume) {
- ddata->child_needs_resume = false;
-
- error = sysc_runtime_resume(ddata->dev);
- if (error)
- dev_err(ddata->dev,
- "%s runtime resume error: %i\n",
- __func__, error);
-
- error = pm_generic_runtime_resume(dev);
- if (error)
- dev_err(ddata->dev,
- "%s generic runtime resume: %i\n",
- __func__, error);
- }
-
- return pm_generic_resume_noirq(dev);
-}
-#endif
-
-static struct dev_pm_domain sysc_child_pm_domain = {
- .ops = {
- SET_RUNTIME_PM_OPS(sysc_child_runtime_suspend,
- sysc_child_runtime_resume,
- NULL)
- USE_PLATFORM_PM_SLEEP_OPS
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_child_suspend_noirq,
- sysc_child_resume_noirq)
- }
-};
-
/* Caller needs to take list_lock if ever used outside of cpu_pm */
static void sysc_reinit_modules(struct sysc_soc_info *soc)
{
@@ -2600,25 +2525,6 @@ out_unlock:
mutex_unlock(&sysc_soc->list_lock);
}
-/**
- * sysc_legacy_idle_quirk - handle children in omap_device compatible way
- * @ddata: device driver data
- * @child: child device driver
- *
- * Allow idle for child devices as done with _od_runtime_suspend().
- * Otherwise many child devices will not idle because of the permanent
- * parent usecount set in pm_runtime_irq_safe().
- *
- * Note that the long term solution is to just modify the child device
- * drivers to not set pm_runtime_irq_safe() and then this can be just
- * dropped.
- */
-static void sysc_legacy_idle_quirk(struct sysc *ddata, struct device *child)
-{
- if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
- dev_pm_domain_set(child, &sysc_child_pm_domain);
-}
-
static int sysc_notifier_call(struct notifier_block *nb,
unsigned long event, void *device)
{
@@ -2635,7 +2541,6 @@ static int sysc_notifier_call(struct notifier_block *nb,
error = sysc_child_add_clocks(ddata, dev);
if (error)
return error;
- sysc_legacy_idle_quirk(ddata, dev);
break;
default:
break;
@@ -2859,8 +2764,7 @@ static const struct sysc_capabilities sysc_34xx_sr = {
.type = TI_SYSC_OMAP34XX_SR,
.sysc_mask = SYSC_OMAP2_CLOCKACTIVITY,
.regbits = &sysc_regbits_omap34xx_sr,
- .mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED |
- SYSC_QUIRK_LEGACY_IDLE,
+ .mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED,
};
/*
@@ -2881,13 +2785,12 @@ static const struct sysc_capabilities sysc_36xx_sr = {
.type = TI_SYSC_OMAP36XX_SR,
.sysc_mask = SYSC_OMAP3_SR_ENAWAKEUP,
.regbits = &sysc_regbits_omap36xx_sr,
- .mod_quirks = SYSC_QUIRK_UNCACHED | SYSC_QUIRK_LEGACY_IDLE,
+ .mod_quirks = SYSC_QUIRK_UNCACHED,
};
static const struct sysc_capabilities sysc_omap4_sr = {
.type = TI_SYSC_OMAP4_SR,
.regbits = &sysc_regbits_omap36xx_sr,
- .mod_quirks = SYSC_QUIRK_LEGACY_IDLE,
};
/*
diff --git a/drivers/cache/sifive_ccache.c b/drivers/cache/sifive_ccache.c
index 89ed6cd6b059..6874b72ec59d 100644
--- a/drivers/cache/sifive_ccache.c
+++ b/drivers/cache/sifive_ccache.c
@@ -15,6 +15,8 @@
#include <linux/of_address.h>
#include <linux/device.h>
#include <linux/bitfield.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <asm/cacheflush.h>
#include <asm/cacheinfo.h>
#include <asm/dma-noncoherent.h>
@@ -247,13 +249,49 @@ static irqreturn_t ccache_int_handler(int irq, void *device)
return IRQ_HANDLED;
}
+static int sifive_ccache_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned long quirks;
+ int intr_num, rc;
+
+ quirks = (unsigned long)device_get_match_data(dev);
+
+ intr_num = platform_irq_count(pdev);
+ if (!intr_num)
+ return dev_err_probe(dev, -ENODEV, "No interrupts property\n");
+
+ for (int i = 0; i < intr_num; i++) {
+ if (i == DATA_UNCORR && (quirks & QUIRK_BROKEN_DATA_UNCORR))
+ continue;
+
+ g_irq[i] = platform_get_irq(pdev, i);
+ if (g_irq[i] < 0)
+ return g_irq[i];
+
+ rc = devm_request_irq(dev, g_irq[i], ccache_int_handler, 0, "ccache_ecc", NULL);
+ if (rc)
+ return dev_err_probe(dev, rc, "Could not request IRQ %d\n", g_irq[i]);
+ }
+
+ return 0;
+}
+
+static struct platform_driver sifive_ccache_driver = {
+ .probe = sifive_ccache_probe,
+ .driver = {
+ .name = "sifive_ccache",
+ .of_match_table = sifive_ccache_ids,
+ },
+};
+
static int __init sifive_ccache_init(void)
{
struct device_node *np;
struct resource res;
- int i, rc, intr_num;
const struct of_device_id *match;
- unsigned long quirks;
+ unsigned long quirks __maybe_unused;
+ int rc;
np = of_find_matching_node_and_match(NULL, sifive_ccache_ids, &match);
if (!np)
@@ -277,28 +315,6 @@ static int __init sifive_ccache_init(void)
goto err_unmap;
}
- intr_num = of_property_count_u32_elems(np, "interrupts");
- if (!intr_num) {
- pr_err("No interrupts property\n");
- rc = -ENODEV;
- goto err_unmap;
- }
-
- for (i = 0; i < intr_num; i++) {
- g_irq[i] = irq_of_parse_and_map(np, i);
-
- if (i == DATA_UNCORR && (quirks & QUIRK_BROKEN_DATA_UNCORR))
- continue;
-
- rc = request_irq(g_irq[i], ccache_int_handler, 0, "ccache_ecc",
- NULL);
- if (rc) {
- pr_err("Could not request IRQ %d\n", g_irq[i]);
- goto err_free_irq;
- }
- }
- of_node_put(np);
-
#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
if (quirks & QUIRK_NONSTANDARD_CACHE_OPS) {
riscv_cbom_block_size = SIFIVE_CCACHE_LINE_SIZE;
@@ -315,11 +331,15 @@ static int __init sifive_ccache_init(void)
#ifdef CONFIG_DEBUG_FS
setup_sifive_debug();
#endif
+
+ rc = platform_driver_register(&sifive_ccache_driver);
+ if (rc)
+ goto err_unmap;
+
+ of_node_put(np);
+
return 0;
-err_free_irq:
- while (--i >= 0)
- free_irq(g_irq[i], NULL);
err_unmap:
iounmap(ccache_base);
err_node_put:
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
index c9bf2c219841..f0d0c044731c 100644
--- a/drivers/char/agp/alpha-agp.c
+++ b/drivers/char/agp/alpha-agp.c
@@ -149,7 +149,7 @@ struct agp_bridge_driver alpha_core_agp_driver = {
struct agp_bridge_data *alpha_bridge;
-int __init
+static int __init
alpha_core_agp_setup(void)
{
alpha_agp_info *agp = alpha_mv.agp_info();
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index a3bbdd6e60fc..f5c71a617a99 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -382,7 +382,7 @@ static ssize_t rng_current_show(struct device *dev,
if (IS_ERR(rng))
return PTR_ERR(rng);
- ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
+ ret = sysfs_emit(buf, "%s\n", rng ? rng->name : "none");
put_rng(rng);
return ret;
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
index 07ec000e4cd7..94ee18a1120a 100644
--- a/drivers/char/hw_random/mxc-rnga.c
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -131,7 +131,7 @@ static void mxc_rnga_cleanup(struct hwrng *rng)
__raw_writel(ctrl & ~RNGA_CONTROL_GO, mxc_rng->mem + RNGA_CONTROL);
}
-static int __init mxc_rnga_probe(struct platform_device *pdev)
+static int mxc_rnga_probe(struct platform_device *pdev)
{
int err;
struct mxc_rng *mxc_rng;
@@ -176,7 +176,7 @@ err_ioremap:
return err;
}
-static void __exit mxc_rnga_remove(struct platform_device *pdev)
+static void mxc_rnga_remove(struct platform_device *pdev)
{
struct mxc_rng *mxc_rng = platform_get_drvdata(pdev);
@@ -197,10 +197,11 @@ static struct platform_driver mxc_rnga_driver = {
.name = "mxc_rnga",
.of_match_table = mxc_rnga_of_match,
},
- .remove_new = __exit_p(mxc_rnga_remove),
+ .probe = mxc_rnga_probe,
+ .remove_new = mxc_rnga_remove,
};
-module_platform_driver_probe(mxc_rnga_driver, mxc_rnga_probe);
+module_platform_driver(mxc_rnga_driver);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("H/W RNGA driver for i.MX");
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 379bc245c520..0e903d6e22e3 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -220,7 +220,8 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
if (err && i > RNG_NB_RECOVER_TRIES) {
dev_err((struct device *)priv->rng.priv,
"Couldn't recover from seed error\n");
- return -ENOTRECOVERABLE;
+ retval = -ENOTRECOVERABLE;
+ goto exit_rpm;
}
continue;
@@ -238,7 +239,8 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
if (err && i > RNG_NB_RECOVER_TRIES) {
dev_err((struct device *)priv->rng.priv,
"Couldn't recover from seed error");
- return -ENOTRECOVERABLE;
+ retval = -ENOTRECOVERABLE;
+ goto exit_rpm;
}
continue;
@@ -250,6 +252,7 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
max -= sizeof(u32);
}
+exit_rpm:
pm_runtime_mark_last_busy((struct device *) priv->rng.priv);
pm_runtime_put_sync_autosuspend((struct device *) priv->rng.priv);
@@ -353,13 +356,15 @@ static int stm32_rng_init(struct hwrng *rng)
err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_SR, reg,
reg & RNG_SR_DRDY,
10, 100000);
- if (err | (reg & ~RNG_SR_DRDY)) {
+ if (err || (reg & ~RNG_SR_DRDY)) {
clk_disable_unprepare(priv->clk);
dev_err((struct device *)priv->rng.priv,
"%s: timeout:%x SR: %x!\n", __func__, err, reg);
return -EINVAL;
}
+ clk_disable_unprepare(priv->clk);
+
return 0;
}
@@ -384,6 +389,11 @@ static int __maybe_unused stm32_rng_runtime_suspend(struct device *dev)
static int __maybe_unused stm32_rng_suspend(struct device *dev)
{
struct stm32_rng_private *priv = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
if (priv->data->has_cond_reset) {
priv->pm_conf.nscr = readl_relaxed(priv->base + RNG_NSCR);
@@ -465,6 +475,8 @@ static int __maybe_unused stm32_rng_resume(struct device *dev)
writel_relaxed(reg, priv->base + RNG_CR);
}
+ clk_disable_unprepare(priv->clk);
+
return 0;
}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 456be28ba67c..2597cb43f438 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -702,7 +702,7 @@ static void extract_entropy(void *buf, size_t len)
static void __cold _credit_init_bits(size_t bits)
{
- static struct execute_work set_ready;
+ static DECLARE_WORK(set_ready, crng_set_ready);
unsigned int new, orig, add;
unsigned long flags;
@@ -718,8 +718,8 @@ static void __cold _credit_init_bits(size_t bits)
if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
- if (static_key_initialized)
- execute_in_process_context(crng_set_ready, &set_ready);
+ if (static_key_initialized && system_unbound_wq)
+ queue_work(system_unbound_wq, &set_ready);
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
@@ -890,8 +890,8 @@ void __init random_init(void)
/*
* If we were initialized by the cpu or bootloader before jump labels
- * are initialized, then we should enable the static branch here, where
- * it's guaranteed that jump labels have been initialized.
+ * or workqueues are initialized, then we should enable the static
+ * branch here, where it's guaranteed that these have been initialized.
*/
if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
crng_set_ready(NULL);
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 927088b2c3d3..e63a6a17793c 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -27,6 +27,20 @@ menuconfig TCG_TPM
if TCG_TPM
+config TCG_TPM2_HMAC
+ bool "Use HMAC and encrypted transactions on the TPM bus"
+ default y
+ select CRYPTO_ECDH
+ select CRYPTO_LIB_AESCFB
+ select CRYPTO_LIB_SHA256
+ help
+ Setting this causes us to deploy a scheme which uses request
+ and response HMACs in addition to encryption for
+ communicating with the TPM to prevent or detect bus snooping
+ and interposer attacks (see tpm-security.rst). Saying Y
+ here adds some encryption overhead to all kernel to TPM
+ transactions.
+
config HW_RANDOM_TPM
bool "TPM HW Random Number Generator support"
depends on TCG_TPM && HW_RANDOM && !(TCG_TPM=y && HW_RANDOM=m)
@@ -149,6 +163,7 @@ config TCG_NSC
config TCG_ATMEL
tristate "Atmel TPM Interface"
depends on PPC64 || HAS_IOPORT_MAP
+ depends on HAS_IOPORT
help
If you have a TPM security chip from Atmel say Yes and it
will be accessible from within Linux. To compile this driver
@@ -156,7 +171,7 @@ config TCG_ATMEL
config TCG_INFINEON
tristate "Infineon Technologies TPM Interface"
- depends on PNP
+ depends on PNP || COMPILE_TEST
help
If you have a TPM security chip from Infineon Technologies
(either SLD 9630 TT 1.1 or SLB 9635 TT 1.2) say Yes and it
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 0222b1ddb310..4c695b0388f3 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -15,7 +15,9 @@ tpm-y += tpm-sysfs.o
tpm-y += eventlog/common.o
tpm-y += eventlog/tpm1.o
tpm-y += eventlog/tpm2.o
+tpm-y += tpm-buf.o
+tpm-$(CONFIG_TCG_TPM2_HMAC) += tpm2-sessions.o
tpm-$(CONFIG_ACPI) += tpm_ppi.o eventlog/acpi.o
tpm-$(CONFIG_EFI) += eventlog/efi.o
tpm-$(CONFIG_OF) += eventlog/of.o
diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
index bd757d836c5c..69533d0bfb51 100644
--- a/drivers/char/tpm/eventlog/acpi.c
+++ b/drivers/char/tpm/eventlog/acpi.c
@@ -142,7 +142,6 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
log->bios_event_log_end = log->bios_event_log + len;
- ret = -EIO;
virt = acpi_os_map_iomem(start, len);
if (!virt) {
dev_warn(&chip->dev, "%s: Failed to map ACPI memory\n", __func__);
diff --git a/drivers/char/tpm/tpm-buf.c b/drivers/char/tpm/tpm-buf.c
new file mode 100644
index 000000000000..647c6ca92ac3
--- /dev/null
+++ b/drivers/char/tpm/tpm-buf.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Handling of TPM command and other buffers.
+ */
+
+#include <linux/tpm_command.h>
+#include <linux/module.h>
+#include <linux/tpm.h>
+
+/**
+ * tpm_buf_init() - Allocate and initialize a TPM command
+ * @buf: A &tpm_buf
+ * @tag: TPM_TAG_RQU_COMMAND, TPM2_ST_NO_SESSIONS or TPM2_ST_SESSIONS
+ * @ordinal: A command ordinal
+ *
+ * Return: 0 or -ENOMEM
+ */
+int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal)
+{
+ buf->data = (u8 *)__get_free_page(GFP_KERNEL);
+ if (!buf->data)
+ return -ENOMEM;
+
+ tpm_buf_reset(buf, tag, ordinal);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_buf_init);
+
+/**
+ * tpm_buf_reset() - Initialize a TPM command
+ * @buf: A &tpm_buf
+ * @tag: TPM_TAG_RQU_COMMAND, TPM2_ST_NO_SESSIONS or TPM2_ST_SESSIONS
+ * @ordinal: A command ordinal
+ */
+void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+
+ WARN_ON(tag != TPM_TAG_RQU_COMMAND && tag != TPM2_ST_NO_SESSIONS &&
+ tag != TPM2_ST_SESSIONS && tag != 0);
+
+ buf->flags = 0;
+ buf->length = sizeof(*head);
+ head->tag = cpu_to_be16(tag);
+ head->length = cpu_to_be32(sizeof(*head));
+ head->ordinal = cpu_to_be32(ordinal);
+ buf->handles = 0;
+}
+EXPORT_SYMBOL_GPL(tpm_buf_reset);
+
+/**
+ * tpm_buf_init_sized() - Allocate and initialize a sized (TPM2B) buffer
+ * @buf: A @tpm_buf
+ *
+ * Return: 0 or -ENOMEM
+ */
+int tpm_buf_init_sized(struct tpm_buf *buf)
+{
+ buf->data = (u8 *)__get_free_page(GFP_KERNEL);
+ if (!buf->data)
+ return -ENOMEM;
+
+ tpm_buf_reset_sized(buf);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_buf_init_sized);
+
+/**
+ * tpm_buf_reset_sized() - Initialize a sized buffer
+ * @buf: A &tpm_buf
+ */
+void tpm_buf_reset_sized(struct tpm_buf *buf)
+{
+ buf->flags = TPM_BUF_TPM2B;
+ buf->length = 2;
+ buf->data[0] = 0;
+ buf->data[1] = 0;
+}
+EXPORT_SYMBOL_GPL(tpm_buf_reset_sized);
+
+void tpm_buf_destroy(struct tpm_buf *buf)
+{
+ free_page((unsigned long)buf->data);
+}
+EXPORT_SYMBOL_GPL(tpm_buf_destroy);
+
+/**
+ * tpm_buf_length() - Return the number of bytes consumed by the data
+ * @buf: A &tpm_buf
+ *
+ * Return: The number of bytes consumed by the buffer
+ */
+u32 tpm_buf_length(struct tpm_buf *buf)
+{
+ return buf->length;
+}
+EXPORT_SYMBOL_GPL(tpm_buf_length);
+
+/**
+ * tpm_buf_append() - Append data to an initialized buffer
+ * @buf: A &tpm_buf
+ * @new_data: A data blob
+ * @new_length: Size of the appended data
+ */
+void tpm_buf_append(struct tpm_buf *buf, const u8 *new_data, u16 new_length)
+{
+ /* Return silently if overflow has already happened. */
+ if (buf->flags & TPM_BUF_OVERFLOW)
+ return;
+
+ if ((buf->length + new_length) > PAGE_SIZE) {
+ WARN(1, "tpm_buf: write overflow\n");
+ buf->flags |= TPM_BUF_OVERFLOW;
+ return;
+ }
+
+ memcpy(&buf->data[buf->length], new_data, new_length);
+ buf->length += new_length;
+
+ if (buf->flags & TPM_BUF_TPM2B)
+ ((__be16 *)buf->data)[0] = cpu_to_be16(buf->length - 2);
+ else
+ ((struct tpm_header *)buf->data)->length = cpu_to_be32(buf->length);
+}
+EXPORT_SYMBOL_GPL(tpm_buf_append);
+
+void tpm_buf_append_u8(struct tpm_buf *buf, const u8 value)
+{
+ tpm_buf_append(buf, &value, 1);
+}
+EXPORT_SYMBOL_GPL(tpm_buf_append_u8);
+
+void tpm_buf_append_u16(struct tpm_buf *buf, const u16 value)
+{
+ __be16 value2 = cpu_to_be16(value);
+
+ tpm_buf_append(buf, (u8 *)&value2, 2);
+}
+EXPORT_SYMBOL_GPL(tpm_buf_append_u16);
+
+void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value)
+{
+ __be32 value2 = cpu_to_be32(value);
+
+ tpm_buf_append(buf, (u8 *)&value2, 4);
+}
+EXPORT_SYMBOL_GPL(tpm_buf_append_u32);
+
+/**
+ * tpm_buf_read() - Read from a TPM buffer
+ * @buf: &tpm_buf instance
+ * @offset: offset within the buffer
+ * @count: the number of bytes to read
+ * @output: the output buffer
+ */
+static void tpm_buf_read(struct tpm_buf *buf, off_t *offset, size_t count, void *output)
+{
+ off_t next_offset;
+
+ /* Return silently if overflow has already happened. */
+ if (buf->flags & TPM_BUF_BOUNDARY_ERROR)
+ return;
+
+ next_offset = *offset + count;
+ if (next_offset > buf->length) {
+ WARN(1, "tpm_buf: read out of boundary\n");
+ buf->flags |= TPM_BUF_BOUNDARY_ERROR;
+ return;
+ }
+
+ memcpy(output, &buf->data[*offset], count);
+ *offset = next_offset;
+}
+
+/**
+ * tpm_buf_read_u8() - Read 8-bit word from a TPM buffer
+ * @buf: &tpm_buf instance
+ * @offset: offset within the buffer
+ *
+ * Return: next 8-bit word
+ */
+u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset)
+{
+ u8 value;
+
+ tpm_buf_read(buf, offset, sizeof(value), &value);
+
+ return value;
+}
+EXPORT_SYMBOL_GPL(tpm_buf_read_u8);
+
+/**
+ * tpm_buf_read_u16() - Read 16-bit word from a TPM buffer
+ * @buf: &tpm_buf instance
+ * @offset: offset within the buffer
+ *
+ * Return: next 16-bit word
+ */
+u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset)
+{
+ u16 value;
+
+ tpm_buf_read(buf, offset, sizeof(value), &value);
+
+ return be16_to_cpu(value);
+}
+EXPORT_SYMBOL_GPL(tpm_buf_read_u16);
+
+/**
+ * tpm_buf_read_u32() - Read 32-bit word from a TPM buffer
+ * @buf: &tpm_buf instance
+ * @offset: offset within the buffer
+ *
+ * Return: next 32-bit word
+ */
+u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset)
+{
+ u32 value;
+
+ tpm_buf_read(buf, offset, sizeof(value), &value);
+
+ return be32_to_cpu(value);
+}
+EXPORT_SYMBOL_GPL(tpm_buf_read_u32);
+
+static u16 tpm_buf_tag(struct tpm_buf *buf)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+
+ return be16_to_cpu(head->tag);
+}
+
+/**
+ * tpm_buf_parameters - return the TPM response parameters area of the tpm_buf
+ * @buf: tpm_buf to use
+ *
+ * Where the parameters are located depends on the tag of a TPM
+ * command (it's immediately after the header for TPM_ST_NO_SESSIONS
+ * or 4 bytes after for TPM_ST_SESSIONS). Evaluate this and return a
+ * pointer to the first byte of the parameters area.
+ *
+ * @return: pointer to parameters area
+ */
+u8 *tpm_buf_parameters(struct tpm_buf *buf)
+{
+ int offset = TPM_HEADER_SIZE;
+
+ if (tpm_buf_tag(buf) == TPM2_ST_SESSIONS)
+ offset += 4;
+
+ return &buf->data[offset];
+}
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 42b1062e33cd..854546000c92 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -158,6 +158,9 @@ int tpm_try_get_ops(struct tpm_chip *chip)
{
int rc = -EIO;
+ if (chip->flags & TPM_CHIP_FLAG_DISABLE)
+ return rc;
+
get_device(&chip->dev);
down_read(&chip->ops_sem);
@@ -275,6 +278,9 @@ static void tpm_dev_release(struct device *dev)
kfree(chip->work_space.context_buf);
kfree(chip->work_space.session_buf);
kfree(chip->allocated_banks);
+#ifdef CONFIG_TCG_TPM2_HMAC
+ kfree(chip->auth);
+#endif
kfree(chip);
}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 757336324c90..5da134f12c9a 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -232,6 +232,7 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf,
if (len < min_rsp_body_length + TPM_HEADER_SIZE)
return -EFAULT;
+ buf->length = len;
return 0;
}
EXPORT_SYMBOL_GPL(tpm_transmit_cmd);
@@ -342,31 +343,6 @@ out:
}
EXPORT_SYMBOL_GPL(tpm_pcr_extend);
-/**
- * tpm_send - send a TPM command
- * @chip: a &struct tpm_chip instance, %NULL for the default chip
- * @cmd: a TPM command buffer
- * @buflen: the length of the TPM command buffer
- *
- * Return: same as with tpm_transmit_cmd()
- */
-int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
-{
- struct tpm_buf buf;
- int rc;
-
- chip = tpm_find_get_ops(chip);
- if (!chip)
- return -ENODEV;
-
- buf.data = cmd;
- rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to a send a command");
-
- tpm_put_ops(chip);
- return rc;
-}
-EXPORT_SYMBOL_GPL(tpm_send);
-
int tpm_auto_startup(struct tpm_chip *chip)
{
int rc;
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index 54c71473aa29..94231f052ea7 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -309,6 +309,21 @@ static ssize_t tpm_version_major_show(struct device *dev,
}
static DEVICE_ATTR_RO(tpm_version_major);
+#ifdef CONFIG_TCG_TPM2_HMAC
+static ssize_t null_name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = to_tpm_chip(dev);
+ int size = TPM2_NAME_SIZE;
+
+ bin2hex(buf, chip->null_key_name, size);
+ size *= 2;
+ buf[size++] = '\n';
+ return size;
+}
+static DEVICE_ATTR_RO(null_name);
+#endif
+
static struct attribute *tpm1_dev_attrs[] = {
&dev_attr_pubek.attr,
&dev_attr_pcrs.attr,
@@ -326,6 +341,9 @@ static struct attribute *tpm1_dev_attrs[] = {
static struct attribute *tpm2_dev_attrs[] = {
&dev_attr_tpm_version_major.attr,
+#ifdef CONFIG_TCG_TPM2_HMAC
+ &dev_attr_null_name.attr,
+#endif
NULL
};
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 61445f1dc46d..6b8b9956ba69 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -312,9 +312,23 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf,
size_t *bufsiz);
int tpm_devs_add(struct tpm_chip *chip);
void tpm_devs_remove(struct tpm_chip *chip);
+int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf,
+ unsigned int buf_size, unsigned int *offset);
+int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
+ unsigned int *offset, u32 *handle);
void tpm_bios_log_setup(struct tpm_chip *chip);
void tpm_bios_log_teardown(struct tpm_chip *chip);
int tpm_dev_common_init(void);
void tpm_dev_common_exit(void);
+
+#ifdef CONFIG_TCG_TPM2_HMAC
+int tpm2_sessions_init(struct tpm_chip *chip);
+#else
+static inline int tpm2_sessions_init(struct tpm_chip *chip)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 93545be190a5..0cdf892ec2a7 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -216,13 +216,6 @@ out:
return rc;
}
-struct tpm2_null_auth_area {
- __be32 handle;
- __be16 nonce_size;
- u8 attributes;
- __be16 auth_size;
-} __packed;
-
/**
* tpm2_pcr_extend() - extend a PCR value
*
@@ -236,24 +229,22 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
struct tpm_digest *digests)
{
struct tpm_buf buf;
- struct tpm2_null_auth_area auth_area;
int rc;
int i;
- rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_PCR_EXTEND);
+ rc = tpm2_start_auth_session(chip);
if (rc)
return rc;
- tpm_buf_append_u32(&buf, pcr_idx);
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_PCR_EXTEND);
+ if (rc) {
+ tpm2_end_auth_session(chip);
+ return rc;
+ }
- auth_area.handle = cpu_to_be32(TPM2_RS_PW);
- auth_area.nonce_size = 0;
- auth_area.attributes = 0;
- auth_area.auth_size = 0;
+ tpm_buf_append_name(chip, &buf, pcr_idx, NULL);
+ tpm_buf_append_hmac_session(chip, &buf, 0, NULL, 0);
- tpm_buf_append_u32(&buf, sizeof(struct tpm2_null_auth_area));
- tpm_buf_append(&buf, (const unsigned char *)&auth_area,
- sizeof(auth_area));
tpm_buf_append_u32(&buf, chip->nr_allocated_banks);
for (i = 0; i < chip->nr_allocated_banks; i++) {
@@ -262,7 +253,9 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
chip->allocated_banks[i].digest_size);
}
+ tpm_buf_fill_hmac_session(chip, &buf);
rc = tpm_transmit_cmd(chip, &buf, 0, "attempting extend a PCR value");
+ rc = tpm_buf_check_hmac_response(chip, &buf, rc);
tpm_buf_destroy(&buf);
@@ -299,25 +292,35 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
if (!num_bytes || max > TPM_MAX_RNG_DATA)
return -EINVAL;
- err = tpm_buf_init(&buf, 0, 0);
+ err = tpm2_start_auth_session(chip);
if (err)
return err;
+ err = tpm_buf_init(&buf, 0, 0);
+ if (err) {
+ tpm2_end_auth_session(chip);
+ return err;
+ }
+
do {
- tpm_buf_reset(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_RANDOM);
+ tpm_buf_reset(&buf, TPM2_ST_SESSIONS, TPM2_CC_GET_RANDOM);
+ tpm_buf_append_hmac_session_opt(chip, &buf, TPM2_SA_ENCRYPT
+ | TPM2_SA_CONTINUE_SESSION,
+ NULL, 0);
tpm_buf_append_u16(&buf, num_bytes);
+ tpm_buf_fill_hmac_session(chip, &buf);
err = tpm_transmit_cmd(chip, &buf,
offsetof(struct tpm2_get_random_out,
buffer),
"attempting get random");
+ err = tpm_buf_check_hmac_response(chip, &buf, err);
if (err) {
if (err > 0)
err = -EIO;
goto out;
}
- out = (struct tpm2_get_random_out *)
- &buf.data[TPM_HEADER_SIZE];
+ out = (struct tpm2_get_random_out *)tpm_buf_parameters(&buf);
recd = min_t(u32, be16_to_cpu(out->size), num_bytes);
if (tpm_buf_length(&buf) <
TPM_HEADER_SIZE +
@@ -334,9 +337,12 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
} while (retries-- && total < max);
tpm_buf_destroy(&buf);
+ tpm2_end_auth_session(chip);
+
return total ? total : -EIO;
out:
tpm_buf_destroy(&buf);
+ tpm2_end_auth_session(chip);
return err;
}
@@ -759,6 +765,11 @@ int tpm2_auto_startup(struct tpm_chip *chip)
rc = 0;
}
+ if (rc)
+ goto out;
+
+ rc = tpm2_sessions_init(chip);
+
out:
/*
* Infineon TPM in field upgrade mode will return no data for the number
diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
new file mode 100644
index 000000000000..ea8860661876
--- /dev/null
+++ b/drivers/char/tpm/tpm2-sessions.c
@@ -0,0 +1,1286 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2018 James.Bottomley@HansenPartnership.com
+ *
+ * Cryptographic helper routines for handling TPM2 sessions for
+ * authorization HMAC and request response encryption.
+ *
+ * The idea is to ensure that every TPM command is HMAC protected by a
+ * session, meaning in-flight tampering would be detected and in
+ * addition all sensitive inputs and responses should be encrypted.
+ *
+ * The basic way this works is to use a TPM feature called salted
+ * sessions where a random secret used in session construction is
+ * encrypted to the public part of a known TPM key. The problem is we
+ * have no known keys, so initially a primary Elliptic Curve key is
+ * derived from the NULL seed (we use EC because most TPMs generate
+ * these keys much faster than RSA ones). The curve used is NIST_P256
+ * because that's now mandated to be present in 'TCG TPM v2.0
+ * Provisioning Guidance'
+ *
+ * Threat problems: the initial TPM2_CreatePrimary is not (and cannot
+ * be) session protected, so a clever Man in the Middle could return a
+ * public key they control to this command and from there intercept
+ * and decode all subsequent session based transactions. The kernel
+ * cannot mitigate this threat but, after boot, userspace can get
+ * proof this has not happened by asking the TPM to certify the NULL
+ * key. This certification would chain back to the TPM Endorsement
+ * Certificate and prove the NULL seed primary had not been tampered
+ * with and thus all sessions must have been cryptographically secure.
+ * To assist with this, the initial NULL seed public key name is made
+ * available in a sysfs file.
+ *
+ * Use of these functions:
+ *
+ * The design is all the crypto, hash and hmac gunk is confined in this
+ * file and never needs to be seen even by the kernel internal user. To
+ * the user there's an init function tpm2_sessions_init() that needs to
+ * be called once per TPM which generates the NULL seed primary key.
+ *
+ * These are the usage functions:
+ *
+ * tpm2_start_auth_session() which allocates the opaque auth structure
+ * and gets a session from the TPM. This must be called before
+ * any of the following functions. The session is protected by a
+ * session_key which is derived from a random salt value
+ * encrypted to the NULL seed.
+ * tpm2_end_auth_session() kills the session and frees the resources.
+ * Under normal operation this function is done by
+ * tpm_buf_check_hmac_response(), so this is only to be used on
+ * error legs where the latter is not executed.
+ * tpm_buf_append_name() to add a handle to the buffer. This must be
+ * used in place of the usual tpm_buf_append_u32() for adding
+ * handles because handles have to be processed specially when
+ * calculating the HMAC. In particular, for NV, volatile and
+ * permanent objects you now need to provide the name.
+ * tpm_buf_append_hmac_session() which appends the hmac session to the
+ * buf in the same way tpm_buf_append_auth does().
+ * tpm_buf_fill_hmac_session() This calculates the correct hash and
+ * places it in the buffer. It must be called after the complete
+ * command buffer is finalized so it can fill in the correct HMAC
+ * based on the parameters.
+ * tpm_buf_check_hmac_response() which checks the session response in
+ * the buffer and calculates what it should be. If there's a
+ * mismatch it will log a warning and return an error. If
+ * tpm_buf_append_hmac_session() did not specify
+ * TPM_SA_CONTINUE_SESSION then the session will be closed (if it
+ * hasn't been consumed) and the auth structure freed.
+ */
+
+#include "tpm.h"
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <asm/unaligned.h>
+#include <crypto/kpp.h>
+#include <crypto/ecdh.h>
+#include <crypto/hash.h>
+#include <crypto/hmac.h>
+
+/* maximum number of names the TPM must remember for authorization */
+#define AUTH_MAX_NAMES 3
+
+static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
+ u32 *handle, u8 *name);
+
+/*
+ * This is the structure that carries all the auth information (like
+ * session handle, nonces, session key and auth) from use to use it is
+ * designed to be opaque to anything outside.
+ */
+struct tpm2_auth {
+ u32 handle;
+ /*
+ * This has two meanings: before tpm_buf_fill_hmac_session()
+ * it marks the offset in the buffer of the start of the
+ * sessions (i.e. after all the handles). Once the buffer has
+ * been filled it markes the session number of our auth
+ * session so we can find it again in the response buffer.
+ *
+ * The two cases are distinguished because the first offset
+ * must always be greater than TPM_HEADER_SIZE and the second
+ * must be less than or equal to 5.
+ */
+ u32 session;
+ /*
+ * the size here is variable and set by the size of our_nonce
+ * which must be between 16 and the name hash length. we set
+ * the maximum sha256 size for the greatest protection
+ */
+ u8 our_nonce[SHA256_DIGEST_SIZE];
+ u8 tpm_nonce[SHA256_DIGEST_SIZE];
+ /*
+ * the salt is only used across the session command/response
+ * after that it can be used as a scratch area
+ */
+ union {
+ u8 salt[EC_PT_SZ];
+ /* scratch for key + IV */
+ u8 scratch[AES_KEY_BYTES + AES_BLOCK_SIZE];
+ };
+ /*
+ * the session key and passphrase are the same size as the
+ * name digest (sha256 again). The session key is constant
+ * for the use of the session and the passphrase can change
+ * with every invocation.
+ *
+ * Note: these fields must be adjacent and in this order
+ * because several HMAC/KDF schemes use the combination of the
+ * session_key and passphrase.
+ */
+ u8 session_key[SHA256_DIGEST_SIZE];
+ u8 passphrase[SHA256_DIGEST_SIZE];
+ int passphrase_len;
+ struct crypto_aes_ctx aes_ctx;
+ /* saved session attributes: */
+ u8 attrs;
+ __be32 ordinal;
+
+ /*
+ * memory for three authorization handles. We know them by
+ * handle, but they are part of the session by name, which
+ * we must compute and remember
+ */
+ u32 name_h[AUTH_MAX_NAMES];
+ u8 name[AUTH_MAX_NAMES][2 + SHA512_DIGEST_SIZE];
+};
+
+/*
+ * Name Size based on TPM algorithm (assumes no hash bigger than 255)
+ */
+static u8 name_size(const u8 *name)
+{
+ static u8 size_map[] = {
+ [TPM_ALG_SHA1] = SHA1_DIGEST_SIZE,
+ [TPM_ALG_SHA256] = SHA256_DIGEST_SIZE,
+ [TPM_ALG_SHA384] = SHA384_DIGEST_SIZE,
+ [TPM_ALG_SHA512] = SHA512_DIGEST_SIZE,
+ };
+ u16 alg = get_unaligned_be16(name);
+ return size_map[alg] + 2;
+}
+
+/*
+ * It turns out the crypto hmac(sha256) is hard for us to consume
+ * because it assumes a fixed key and the TPM seems to change the key
+ * on every operation, so we weld the hmac init and final functions in
+ * here to give it the same usage characteristics as a regular hash
+ */
+static void tpm2_hmac_init(struct sha256_state *sctx, u8 *key, u32 key_len)
+{
+ u8 pad[SHA256_BLOCK_SIZE];
+ int i;
+
+ sha256_init(sctx);
+ for (i = 0; i < sizeof(pad); i++) {
+ if (i < key_len)
+ pad[i] = key[i];
+ else
+ pad[i] = 0;
+ pad[i] ^= HMAC_IPAD_VALUE;
+ }
+ sha256_update(sctx, pad, sizeof(pad));
+}
+
+static void tpm2_hmac_final(struct sha256_state *sctx, u8 *key, u32 key_len,
+ u8 *out)
+{
+ u8 pad[SHA256_BLOCK_SIZE];
+ int i;
+
+ for (i = 0; i < sizeof(pad); i++) {
+ if (i < key_len)
+ pad[i] = key[i];
+ else
+ pad[i] = 0;
+ pad[i] ^= HMAC_OPAD_VALUE;
+ }
+
+ /* collect the final hash; use out as temporary storage */
+ sha256_final(sctx, out);
+
+ sha256_init(sctx);
+ sha256_update(sctx, pad, sizeof(pad));
+ sha256_update(sctx, out, SHA256_DIGEST_SIZE);
+ sha256_final(sctx, out);
+}
+
+/*
+ * assume hash sha256 and nonces u, v of size SHA256_DIGEST_SIZE but
+ * otherwise standard tpm2_KDFa. Note output is in bytes not bits.
+ */
+static void tpm2_KDFa(u8 *key, u32 key_len, const char *label, u8 *u,
+ u8 *v, u32 bytes, u8 *out)
+{
+ u32 counter = 1;
+ const __be32 bits = cpu_to_be32(bytes * 8);
+
+ while (bytes > 0) {
+ struct sha256_state sctx;
+ __be32 c = cpu_to_be32(counter);
+
+ tpm2_hmac_init(&sctx, key, key_len);
+ sha256_update(&sctx, (u8 *)&c, sizeof(c));
+ sha256_update(&sctx, label, strlen(label)+1);
+ sha256_update(&sctx, u, SHA256_DIGEST_SIZE);
+ sha256_update(&sctx, v, SHA256_DIGEST_SIZE);
+ sha256_update(&sctx, (u8 *)&bits, sizeof(bits));
+ tpm2_hmac_final(&sctx, key, key_len, out);
+
+ bytes -= SHA256_DIGEST_SIZE;
+ counter++;
+ out += SHA256_DIGEST_SIZE;
+ }
+}
+
+/*
+ * Somewhat of a bastardization of the real KDFe. We're assuming
+ * we're working with known point sizes for the input parameters and
+ * the hash algorithm is fixed at sha256. Because we know that the
+ * point size is 32 bytes like the hash size, there's no need to loop
+ * in this KDF.
+ */
+static void tpm2_KDFe(u8 z[EC_PT_SZ], const char *str, u8 *pt_u, u8 *pt_v,
+ u8 *out)
+{
+ struct sha256_state sctx;
+ /*
+ * this should be an iterative counter, but because we know
+ * we're only taking 32 bytes for the point using a sha256
+ * hash which is also 32 bytes, there's only one loop
+ */
+ __be32 c = cpu_to_be32(1);
+
+ sha256_init(&sctx);
+ /* counter (BE) */
+ sha256_update(&sctx, (u8 *)&c, sizeof(c));
+ /* secret value */
+ sha256_update(&sctx, z, EC_PT_SZ);
+ /* string including trailing zero */
+ sha256_update(&sctx, str, strlen(str)+1);
+ sha256_update(&sctx, pt_u, EC_PT_SZ);
+ sha256_update(&sctx, pt_v, EC_PT_SZ);
+ sha256_final(&sctx, out);
+}
+
+static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip)
+{
+ struct crypto_kpp *kpp;
+ struct kpp_request *req;
+ struct scatterlist s[2], d[1];
+ struct ecdh p = {0};
+ u8 encoded_key[EC_PT_SZ], *x, *y;
+ unsigned int buf_len;
+
+ /* secret is two sized points */
+ tpm_buf_append_u16(buf, (EC_PT_SZ + 2)*2);
+ /*
+ * we cheat here and append uninitialized data to form
+ * the points. All we care about is getting the two
+ * co-ordinate pointers, which will be used to overwrite
+ * the uninitialized data
+ */
+ tpm_buf_append_u16(buf, EC_PT_SZ);
+ x = &buf->data[tpm_buf_length(buf)];
+ tpm_buf_append(buf, encoded_key, EC_PT_SZ);
+ tpm_buf_append_u16(buf, EC_PT_SZ);
+ y = &buf->data[tpm_buf_length(buf)];
+ tpm_buf_append(buf, encoded_key, EC_PT_SZ);
+ sg_init_table(s, 2);
+ sg_set_buf(&s[0], x, EC_PT_SZ);
+ sg_set_buf(&s[1], y, EC_PT_SZ);
+
+ kpp = crypto_alloc_kpp("ecdh-nist-p256", CRYPTO_ALG_INTERNAL, 0);
+ if (IS_ERR(kpp)) {
+ dev_err(&chip->dev, "crypto ecdh allocation failed\n");
+ return;
+ }
+
+ buf_len = crypto_ecdh_key_len(&p);
+ if (sizeof(encoded_key) < buf_len) {
+ dev_err(&chip->dev, "salt buffer too small needs %d\n",
+ buf_len);
+ goto out;
+ }
+ crypto_ecdh_encode_key(encoded_key, buf_len, &p);
+ /* this generates a random private key */
+ crypto_kpp_set_secret(kpp, encoded_key, buf_len);
+
+ /* salt is now the public point of this private key */
+ req = kpp_request_alloc(kpp, GFP_KERNEL);
+ if (!req)
+ goto out;
+ kpp_request_set_input(req, NULL, 0);
+ kpp_request_set_output(req, s, EC_PT_SZ*2);
+ crypto_kpp_generate_public_key(req);
+ /*
+ * we're not done: now we have to compute the shared secret
+ * which is our private key multiplied by the tpm_key public
+ * point, we actually only take the x point and discard the y
+ * point and feed it through KDFe to get the final secret salt
+ */
+ sg_set_buf(&s[0], chip->null_ec_key_x, EC_PT_SZ);
+ sg_set_buf(&s[1], chip->null_ec_key_y, EC_PT_SZ);
+ kpp_request_set_input(req, s, EC_PT_SZ*2);
+ sg_init_one(d, chip->auth->salt, EC_PT_SZ);
+ kpp_request_set_output(req, d, EC_PT_SZ);
+ crypto_kpp_compute_shared_secret(req);
+ kpp_request_free(req);
+
+ /*
+ * pass the shared secret through KDFe for salt. Note salt
+ * area is used both for input shared secret and output salt.
+ * This works because KDFe fully consumes the secret before it
+ * writes the salt
+ */
+ tpm2_KDFe(chip->auth->salt, "SECRET", x, chip->null_ec_key_x,
+ chip->auth->salt);
+
+ out:
+ crypto_free_kpp(kpp);
+}
+
+/**
+ * tpm_buf_append_hmac_session() - Append a TPM session element
+ * @chip: the TPM chip structure
+ * @buf: The buffer to be appended
+ * @attributes: The session attributes
+ * @passphrase: The session authority (NULL if none)
+ * @passphrase_len: The length of the session authority (0 if none)
+ *
+ * This fills in a session structure in the TPM command buffer, except
+ * for the HMAC which cannot be computed until the command buffer is
+ * complete. The type of session is controlled by the @attributes,
+ * the main ones of which are TPM2_SA_CONTINUE_SESSION which means the
+ * session won't terminate after tpm_buf_check_hmac_response(),
+ * TPM2_SA_DECRYPT which means this buffers first parameter should be
+ * encrypted with a session key and TPM2_SA_ENCRYPT, which means the
+ * response buffer's first parameter needs to be decrypted (confusing,
+ * but the defines are written from the point of view of the TPM).
+ *
+ * Any session appended by this command must be finalized by calling
+ * tpm_buf_fill_hmac_session() otherwise the HMAC will be incorrect
+ * and the TPM will reject the command.
+ *
+ * As with most tpm_buf operations, success is assumed because failure
+ * will be caused by an incorrect programming model and indicated by a
+ * kernel message.
+ */
+void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
+ u8 attributes, u8 *passphrase,
+ int passphrase_len)
+{
+ u8 nonce[SHA256_DIGEST_SIZE];
+ u32 len;
+ struct tpm2_auth *auth = chip->auth;
+
+ /*
+ * The Architecture Guide requires us to strip trailing zeros
+ * before computing the HMAC
+ */
+ while (passphrase && passphrase_len > 0
+ && passphrase[passphrase_len - 1] == '\0')
+ passphrase_len--;
+
+ auth->attrs = attributes;
+ auth->passphrase_len = passphrase_len;
+ if (passphrase_len)
+ memcpy(auth->passphrase, passphrase, passphrase_len);
+
+ if (auth->session != tpm_buf_length(buf)) {
+ /* we're not the first session */
+ len = get_unaligned_be32(&buf->data[auth->session]);
+ if (4 + len + auth->session != tpm_buf_length(buf)) {
+ WARN(1, "session length mismatch, cannot append");
+ return;
+ }
+
+ /* add our new session */
+ len += 9 + 2 * SHA256_DIGEST_SIZE;
+ put_unaligned_be32(len, &buf->data[auth->session]);
+ } else {
+ tpm_buf_append_u32(buf, 9 + 2 * SHA256_DIGEST_SIZE);
+ }
+
+ /* random number for our nonce */
+ get_random_bytes(nonce, sizeof(nonce));
+ memcpy(auth->our_nonce, nonce, sizeof(nonce));
+ tpm_buf_append_u32(buf, auth->handle);
+ /* our new nonce */
+ tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE);
+ tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE);
+ tpm_buf_append_u8(buf, auth->attrs);
+ /* and put a placeholder for the hmac */
+ tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE);
+ tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE);
+}
+EXPORT_SYMBOL(tpm_buf_append_hmac_session);
+
+/**
+ * tpm_buf_fill_hmac_session() - finalize the session HMAC
+ * @chip: the TPM chip structure
+ * @buf: The buffer to be appended
+ *
+ * This command must not be called until all of the parameters have
+ * been appended to @buf otherwise the computed HMAC will be
+ * incorrect.
+ *
+ * This function computes and fills in the session HMAC using the
+ * session key and, if TPM2_SA_DECRYPT was specified, computes the
+ * encryption key and encrypts the first parameter of the command
+ * buffer with it.
+ *
+ * As with most tpm_buf operations, success is assumed because failure
+ * will be caused by an incorrect programming model and indicated by a
+ * kernel message.
+ */
+void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
+{
+ u32 cc, handles, val;
+ struct tpm2_auth *auth = chip->auth;
+ int i;
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+ off_t offset_s = TPM_HEADER_SIZE, offset_p;
+ u8 *hmac = NULL;
+ u32 attrs;
+ u8 cphash[SHA256_DIGEST_SIZE];
+ struct sha256_state sctx;
+
+ /* save the command code in BE format */
+ auth->ordinal = head->ordinal;
+
+ cc = be32_to_cpu(head->ordinal);
+
+ i = tpm2_find_cc(chip, cc);
+ if (i < 0) {
+ dev_err(&chip->dev, "Command 0x%x not found in TPM\n", cc);
+ return;
+ }
+ attrs = chip->cc_attrs_tbl[i];
+
+ handles = (attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0);
+
+ /*
+ * just check the names, it's easy to make mistakes. This
+ * would happen if someone added a handle via
+ * tpm_buf_append_u32() instead of tpm_buf_append_name()
+ */
+ for (i = 0; i < handles; i++) {
+ u32 handle = tpm_buf_read_u32(buf, &offset_s);
+
+ if (auth->name_h[i] != handle) {
+ dev_err(&chip->dev, "TPM: handle %d wrong for name\n",
+ i);
+ return;
+ }
+ }
+ /* point offset_s to the start of the sessions */
+ val = tpm_buf_read_u32(buf, &offset_s);
+ /* point offset_p to the start of the parameters */
+ offset_p = offset_s + val;
+ for (i = 1; offset_s < offset_p; i++) {
+ u32 handle = tpm_buf_read_u32(buf, &offset_s);
+ u16 len;
+ u8 a;
+
+ /* nonce (already in auth) */
+ len = tpm_buf_read_u16(buf, &offset_s);
+ offset_s += len;
+
+ a = tpm_buf_read_u8(buf, &offset_s);
+
+ len = tpm_buf_read_u16(buf, &offset_s);
+ if (handle == auth->handle && auth->attrs == a) {
+ hmac = &buf->data[offset_s];
+ /*
+ * save our session number so we know which
+ * session in the response belongs to us
+ */
+ auth->session = i;
+ }
+
+ offset_s += len;
+ }
+ if (offset_s != offset_p) {
+ dev_err(&chip->dev, "TPM session length is incorrect\n");
+ return;
+ }
+ if (!hmac) {
+ dev_err(&chip->dev, "TPM could not find HMAC session\n");
+ return;
+ }
+
+ /* encrypt before HMAC */
+ if (auth->attrs & TPM2_SA_DECRYPT) {
+ u16 len;
+
+ /* need key and IV */
+ tpm2_KDFa(auth->session_key, SHA256_DIGEST_SIZE
+ + auth->passphrase_len, "CFB", auth->our_nonce,
+ auth->tpm_nonce, AES_KEY_BYTES + AES_BLOCK_SIZE,
+ auth->scratch);
+
+ len = tpm_buf_read_u16(buf, &offset_p);
+ aes_expandkey(&auth->aes_ctx, auth->scratch, AES_KEY_BYTES);
+ aescfb_encrypt(&auth->aes_ctx, &buf->data[offset_p],
+ &buf->data[offset_p], len,
+ auth->scratch + AES_KEY_BYTES);
+ /* reset p to beginning of parameters for HMAC */
+ offset_p -= 2;
+ }
+
+ sha256_init(&sctx);
+ /* ordinal is already BE */
+ sha256_update(&sctx, (u8 *)&head->ordinal, sizeof(head->ordinal));
+ /* add the handle names */
+ for (i = 0; i < handles; i++) {
+ enum tpm2_mso_type mso = tpm2_handle_mso(auth->name_h[i]);
+
+ if (mso == TPM2_MSO_PERSISTENT ||
+ mso == TPM2_MSO_VOLATILE ||
+ mso == TPM2_MSO_NVRAM) {
+ sha256_update(&sctx, auth->name[i],
+ name_size(auth->name[i]));
+ } else {
+ __be32 h = cpu_to_be32(auth->name_h[i]);
+
+ sha256_update(&sctx, (u8 *)&h, 4);
+ }
+ }
+ if (offset_s != tpm_buf_length(buf))
+ sha256_update(&sctx, &buf->data[offset_s],
+ tpm_buf_length(buf) - offset_s);
+ sha256_final(&sctx, cphash);
+
+ /* now calculate the hmac */
+ tpm2_hmac_init(&sctx, auth->session_key, sizeof(auth->session_key)
+ + auth->passphrase_len);
+ sha256_update(&sctx, cphash, sizeof(cphash));
+ sha256_update(&sctx, auth->our_nonce, sizeof(auth->our_nonce));
+ sha256_update(&sctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
+ sha256_update(&sctx, &auth->attrs, 1);
+ tpm2_hmac_final(&sctx, auth->session_key, sizeof(auth->session_key)
+ + auth->passphrase_len, hmac);
+}
+EXPORT_SYMBOL(tpm_buf_fill_hmac_session);
+
+static int tpm2_parse_read_public(char *name, struct tpm_buf *buf)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+ off_t offset = TPM_HEADER_SIZE;
+ u32 tot_len = be32_to_cpu(head->length);
+ u32 val;
+
+ /* we're starting after the header so adjust the length */
+ tot_len -= TPM_HEADER_SIZE;
+
+ /* skip public */
+ val = tpm_buf_read_u16(buf, &offset);
+ if (val > tot_len)
+ return -EINVAL;
+ offset += val;
+ /* name */
+ val = tpm_buf_read_u16(buf, &offset);
+ if (val != name_size(&buf->data[offset]))
+ return -EINVAL;
+ memcpy(name, &buf->data[offset], val);
+ /* forget the rest */
+ return 0;
+}
+
+static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_READ_PUBLIC);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, handle);
+ rc = tpm_transmit_cmd(chip, &buf, 0, "read public");
+ if (rc == TPM2_RC_SUCCESS)
+ rc = tpm2_parse_read_public(name, &buf);
+
+ tpm_buf_destroy(&buf);
+
+ return rc;
+}
+
+/**
+ * tpm_buf_append_name() - add a handle area to the buffer
+ * @chip: the TPM chip structure
+ * @buf: The buffer to be appended
+ * @handle: The handle to be appended
+ * @name: The name of the handle (may be NULL)
+ *
+ * In order to compute session HMACs, we need to know the names of the
+ * objects pointed to by the handles. For most objects, this is simply
+ * the actual 4 byte handle or an empty buf (in these cases @name
+ * should be NULL) but for volatile objects, permanent objects and NV
+ * areas, the name is defined as the hash (according to the name
+ * algorithm which should be set to sha256) of the public area to
+ * which the two byte algorithm id has been appended. For these
+ * objects, the @name pointer should point to this. If a name is
+ * required but @name is NULL, then TPM2_ReadPublic() will be called
+ * on the handle to obtain the name.
+ *
+ * As with most tpm_buf operations, success is assumed because failure
+ * will be caused by an incorrect programming model and indicated by a
+ * kernel message.
+ */
+void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
+ u32 handle, u8 *name)
+{
+ enum tpm2_mso_type mso = tpm2_handle_mso(handle);
+ struct tpm2_auth *auth = chip->auth;
+ int slot;
+
+ slot = (tpm_buf_length(buf) - TPM_HEADER_SIZE)/4;
+ if (slot >= AUTH_MAX_NAMES) {
+ dev_err(&chip->dev, "TPM: too many handles\n");
+ return;
+ }
+ WARN(auth->session != tpm_buf_length(buf),
+ "name added in wrong place\n");
+ tpm_buf_append_u32(buf, handle);
+ auth->session += 4;
+
+ if (mso == TPM2_MSO_PERSISTENT ||
+ mso == TPM2_MSO_VOLATILE ||
+ mso == TPM2_MSO_NVRAM) {
+ if (!name)
+ tpm2_read_public(chip, handle, auth->name[slot]);
+ } else {
+ if (name)
+ dev_err(&chip->dev, "TPM: Handle does not require name but one is specified\n");
+ }
+
+ auth->name_h[slot] = handle;
+ if (name)
+ memcpy(auth->name[slot], name, name_size(name));
+}
+EXPORT_SYMBOL(tpm_buf_append_name);
+
+/**
+ * tpm_buf_check_hmac_response() - check the TPM return HMAC for correctness
+ * @chip: the TPM chip structure
+ * @buf: the original command buffer (which now contains the response)
+ * @rc: the return code from tpm_transmit_cmd
+ *
+ * If @rc is non zero, @buf may not contain an actual return, so @rc
+ * is passed through as the return and the session cleaned up and
+ * de-allocated if required (this is required if
+ * TPM2_SA_CONTINUE_SESSION was not specified as a session flag).
+ *
+ * If @rc is zero, the response HMAC is computed against the returned
+ * @buf and matched to the TPM one in the session area. If there is a
+ * mismatch, an error is logged and -EINVAL returned.
+ *
+ * The reason for this is that the command issue and HMAC check
+ * sequence should look like:
+ *
+ * rc = tpm_transmit_cmd(...);
+ * rc = tpm_buf_check_hmac_response(&buf, auth, rc);
+ * if (rc)
+ * ...
+ *
+ * Which is easily layered into the current contrl flow.
+ *
+ * Returns: 0 on success or an error.
+ */
+int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
+ int rc)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+ struct tpm2_auth *auth = chip->auth;
+ off_t offset_s, offset_p;
+ u8 rphash[SHA256_DIGEST_SIZE];
+ u32 attrs;
+ struct sha256_state sctx;
+ u16 tag = be16_to_cpu(head->tag);
+ u32 cc = be32_to_cpu(auth->ordinal);
+ int parm_len, len, i, handles;
+
+ if (auth->session >= TPM_HEADER_SIZE) {
+ WARN(1, "tpm session not filled correctly\n");
+ goto out;
+ }
+
+ if (rc != 0)
+ /* pass non success rc through and close the session */
+ goto out;
+
+ rc = -EINVAL;
+ if (tag != TPM2_ST_SESSIONS) {
+ dev_err(&chip->dev, "TPM: HMAC response check has no sessions tag\n");
+ goto out;
+ }
+
+ i = tpm2_find_cc(chip, cc);
+ if (i < 0)
+ goto out;
+ attrs = chip->cc_attrs_tbl[i];
+ handles = (attrs >> TPM2_CC_ATTR_RHANDLE) & 1;
+
+ /* point to area beyond handles */
+ offset_s = TPM_HEADER_SIZE + handles * 4;
+ parm_len = tpm_buf_read_u32(buf, &offset_s);
+ offset_p = offset_s;
+ offset_s += parm_len;
+ /* skip over any sessions before ours */
+ for (i = 0; i < auth->session - 1; i++) {
+ len = tpm_buf_read_u16(buf, &offset_s);
+ offset_s += len + 1;
+ len = tpm_buf_read_u16(buf, &offset_s);
+ offset_s += len;
+ }
+ /* TPM nonce */
+ len = tpm_buf_read_u16(buf, &offset_s);
+ if (offset_s + len > tpm_buf_length(buf))
+ goto out;
+ if (len != SHA256_DIGEST_SIZE)
+ goto out;
+ memcpy(auth->tpm_nonce, &buf->data[offset_s], len);
+ offset_s += len;
+ attrs = tpm_buf_read_u8(buf, &offset_s);
+ len = tpm_buf_read_u16(buf, &offset_s);
+ if (offset_s + len != tpm_buf_length(buf))
+ goto out;
+ if (len != SHA256_DIGEST_SIZE)
+ goto out;
+ /*
+ * offset_s points to the HMAC. now calculate comparison, beginning
+ * with rphash
+ */
+ sha256_init(&sctx);
+ /* yes, I know this is now zero, but it's what the standard says */
+ sha256_update(&sctx, (u8 *)&head->return_code,
+ sizeof(head->return_code));
+ /* ordinal is already BE */
+ sha256_update(&sctx, (u8 *)&auth->ordinal, sizeof(auth->ordinal));
+ sha256_update(&sctx, &buf->data[offset_p], parm_len);
+ sha256_final(&sctx, rphash);
+
+ /* now calculate the hmac */
+ tpm2_hmac_init(&sctx, auth->session_key, sizeof(auth->session_key)
+ + auth->passphrase_len);
+ sha256_update(&sctx, rphash, sizeof(rphash));
+ sha256_update(&sctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
+ sha256_update(&sctx, auth->our_nonce, sizeof(auth->our_nonce));
+ sha256_update(&sctx, &auth->attrs, 1);
+ /* we're done with the rphash, so put our idea of the hmac there */
+ tpm2_hmac_final(&sctx, auth->session_key, sizeof(auth->session_key)
+ + auth->passphrase_len, rphash);
+ if (memcmp(rphash, &buf->data[offset_s], SHA256_DIGEST_SIZE) == 0) {
+ rc = 0;
+ } else {
+ dev_err(&chip->dev, "TPM: HMAC check failed\n");
+ goto out;
+ }
+
+ /* now do response decryption */
+ if (auth->attrs & TPM2_SA_ENCRYPT) {
+ /* need key and IV */
+ tpm2_KDFa(auth->session_key, SHA256_DIGEST_SIZE
+ + auth->passphrase_len, "CFB", auth->tpm_nonce,
+ auth->our_nonce, AES_KEY_BYTES + AES_BLOCK_SIZE,
+ auth->scratch);
+
+ len = tpm_buf_read_u16(buf, &offset_p);
+ aes_expandkey(&auth->aes_ctx, auth->scratch, AES_KEY_BYTES);
+ aescfb_decrypt(&auth->aes_ctx, &buf->data[offset_p],
+ &buf->data[offset_p], len,
+ auth->scratch + AES_KEY_BYTES);
+ }
+
+ out:
+ if ((auth->attrs & TPM2_SA_CONTINUE_SESSION) == 0) {
+ if (rc)
+ /* manually close the session if it wasn't consumed */
+ tpm2_flush_context(chip, auth->handle);
+ memzero_explicit(auth, sizeof(*auth));
+ } else {
+ /* reset for next use */
+ auth->session = TPM_HEADER_SIZE;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(tpm_buf_check_hmac_response);
+
+/**
+ * tpm2_end_auth_session() - kill the allocated auth session
+ * @chip: the TPM chip structure
+ *
+ * ends the session started by tpm2_start_auth_session and frees all
+ * the resources. Under normal conditions,
+ * tpm_buf_check_hmac_response() will correctly end the session if
+ * required, so this function is only for use in error legs that will
+ * bypass the normal invocation of tpm_buf_check_hmac_response().
+ */
+void tpm2_end_auth_session(struct tpm_chip *chip)
+{
+ tpm2_flush_context(chip, chip->auth->handle);
+ memzero_explicit(chip->auth, sizeof(*chip->auth));
+}
+EXPORT_SYMBOL(tpm2_end_auth_session);
+
+static int tpm2_parse_start_auth_session(struct tpm2_auth *auth,
+ struct tpm_buf *buf)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+ u32 tot_len = be32_to_cpu(head->length);
+ off_t offset = TPM_HEADER_SIZE;
+ u32 val;
+
+ /* we're starting after the header so adjust the length */
+ tot_len -= TPM_HEADER_SIZE;
+
+ /* should have handle plus nonce */
+ if (tot_len != 4 + 2 + sizeof(auth->tpm_nonce))
+ return -EINVAL;
+
+ auth->handle = tpm_buf_read_u32(buf, &offset);
+ val = tpm_buf_read_u16(buf, &offset);
+ if (val != sizeof(auth->tpm_nonce))
+ return -EINVAL;
+ memcpy(auth->tpm_nonce, &buf->data[offset], sizeof(auth->tpm_nonce));
+ /* now compute the session key from the nonces */
+ tpm2_KDFa(auth->salt, sizeof(auth->salt), "ATH", auth->tpm_nonce,
+ auth->our_nonce, sizeof(auth->session_key),
+ auth->session_key);
+
+ return 0;
+}
+
+static int tpm2_load_null(struct tpm_chip *chip, u32 *null_key)
+{
+ int rc;
+ unsigned int offset = 0; /* dummy offset for null seed context */
+ u8 name[SHA256_DIGEST_SIZE + 2];
+
+ rc = tpm2_load_context(chip, chip->null_key_context, &offset,
+ null_key);
+ if (rc != -EINVAL)
+ return rc;
+
+ /* an integrity failure may mean the TPM has been reset */
+ dev_err(&chip->dev, "NULL key integrity failure!\n");
+ /* check the null name against what we know */
+ tpm2_create_primary(chip, TPM2_RH_NULL, NULL, name);
+ if (memcmp(name, chip->null_key_name, sizeof(name)) == 0)
+ /* name unchanged, assume transient integrity failure */
+ return rc;
+ /*
+ * Fatal TPM failure: the NULL seed has actually changed, so
+ * the TPM must have been illegally reset. All in-kernel TPM
+ * operations will fail because the NULL primary can't be
+ * loaded to salt the sessions, but disable the TPM anyway so
+ * userspace programmes can't be compromised by it.
+ */
+ dev_err(&chip->dev, "NULL name has changed, disabling TPM due to interference\n");
+ chip->flags |= TPM_CHIP_FLAG_DISABLE;
+
+ return rc;
+}
+
+/**
+ * tpm2_start_auth_session() - create a HMAC authentication session with the TPM
+ * @chip: the TPM chip structure to create the session with
+ *
+ * This function loads the NULL seed from its saved context and starts
+ * an authentication session on the null seed, fills in the
+ * @chip->auth structure to contain all the session details necessary
+ * for performing the HMAC, encrypt and decrypt operations and
+ * returns. The NULL seed is flushed before this function returns.
+ *
+ * Return: zero on success or actual error encountered.
+ */
+int tpm2_start_auth_session(struct tpm_chip *chip)
+{
+ struct tpm_buf buf;
+ struct tpm2_auth *auth = chip->auth;
+ int rc;
+ u32 null_key;
+
+ rc = tpm2_load_null(chip, &null_key);
+ if (rc)
+ goto out;
+
+ auth->session = TPM_HEADER_SIZE;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_START_AUTH_SESS);
+ if (rc)
+ goto out;
+
+ /* salt key handle */
+ tpm_buf_append_u32(&buf, null_key);
+ /* bind key handle */
+ tpm_buf_append_u32(&buf, TPM2_RH_NULL);
+ /* nonce caller */
+ get_random_bytes(auth->our_nonce, sizeof(auth->our_nonce));
+ tpm_buf_append_u16(&buf, sizeof(auth->our_nonce));
+ tpm_buf_append(&buf, auth->our_nonce, sizeof(auth->our_nonce));
+
+ /* append encrypted salt and squirrel away unencrypted in auth */
+ tpm_buf_append_salt(&buf, chip);
+ /* session type (HMAC, audit or policy) */
+ tpm_buf_append_u8(&buf, TPM2_SE_HMAC);
+
+ /* symmetric encryption parameters */
+ /* symmetric algorithm */
+ tpm_buf_append_u16(&buf, TPM_ALG_AES);
+ /* bits for symmetric algorithm */
+ tpm_buf_append_u16(&buf, AES_KEY_BITS);
+ /* symmetric algorithm mode (must be CFB) */
+ tpm_buf_append_u16(&buf, TPM_ALG_CFB);
+ /* hash algorithm for session */
+ tpm_buf_append_u16(&buf, TPM_ALG_SHA256);
+
+ rc = tpm_transmit_cmd(chip, &buf, 0, "start auth session");
+ tpm2_flush_context(chip, null_key);
+
+ if (rc == TPM2_RC_SUCCESS)
+ rc = tpm2_parse_start_auth_session(auth, &buf);
+
+ tpm_buf_destroy(&buf);
+
+ if (rc)
+ goto out;
+
+ out:
+ return rc;
+}
+EXPORT_SYMBOL(tpm2_start_auth_session);
+
+/**
+ * tpm2_parse_create_primary() - parse the data returned from TPM_CC_CREATE_PRIMARY
+ *
+ * @chip: The TPM the primary was created under
+ * @buf: The response buffer from the chip
+ * @handle: pointer to be filled in with the return handle of the primary
+ * @hierarchy: The hierarchy the primary was created for
+ * @name: pointer to be filled in with the primary key name
+ *
+ * Return:
+ * * 0 - OK
+ * * -errno - A system error
+ * * TPM_RC - A TPM error
+ */
+static int tpm2_parse_create_primary(struct tpm_chip *chip, struct tpm_buf *buf,
+ u32 *handle, u32 hierarchy, u8 *name)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+ off_t offset_r = TPM_HEADER_SIZE, offset_t;
+ u16 len = TPM_HEADER_SIZE;
+ u32 total_len = be32_to_cpu(head->length);
+ u32 val, param_len, keyhandle;
+
+ keyhandle = tpm_buf_read_u32(buf, &offset_r);
+ if (handle)
+ *handle = keyhandle;
+ else
+ tpm2_flush_context(chip, keyhandle);
+
+ param_len = tpm_buf_read_u32(buf, &offset_r);
+ /*
+ * param_len doesn't include the header, but all the other
+ * lengths and offsets do, so add it to parm len to make
+ * the comparisons easier
+ */
+ param_len += TPM_HEADER_SIZE;
+
+ if (param_len + 8 > total_len)
+ return -EINVAL;
+ len = tpm_buf_read_u16(buf, &offset_r);
+ offset_t = offset_r;
+ if (name) {
+ /*
+ * now we have the public area, compute the name of
+ * the object
+ */
+ put_unaligned_be16(TPM_ALG_SHA256, name);
+ sha256(&buf->data[offset_r], len, name + 2);
+ }
+
+ /* validate the public key */
+ val = tpm_buf_read_u16(buf, &offset_t);
+
+ /* key type (must be what we asked for) */
+ if (val != TPM_ALG_ECC)
+ return -EINVAL;
+ val = tpm_buf_read_u16(buf, &offset_t);
+
+ /* name algorithm */
+ if (val != TPM_ALG_SHA256)
+ return -EINVAL;
+ val = tpm_buf_read_u32(buf, &offset_t);
+
+ /* object properties */
+ if (val != TPM2_OA_TMPL)
+ return -EINVAL;
+
+ /* auth policy (empty) */
+ val = tpm_buf_read_u16(buf, &offset_t);
+ if (val != 0)
+ return -EINVAL;
+
+ /* symmetric key parameters */
+ val = tpm_buf_read_u16(buf, &offset_t);
+ if (val != TPM_ALG_AES)
+ return -EINVAL;
+
+ /* symmetric key length */
+ val = tpm_buf_read_u16(buf, &offset_t);
+ if (val != AES_KEY_BITS)
+ return -EINVAL;
+
+ /* symmetric encryption scheme */
+ val = tpm_buf_read_u16(buf, &offset_t);
+ if (val != TPM_ALG_CFB)
+ return -EINVAL;
+
+ /* signing scheme */
+ val = tpm_buf_read_u16(buf, &offset_t);
+ if (val != TPM_ALG_NULL)
+ return -EINVAL;
+
+ /* ECC Curve */
+ val = tpm_buf_read_u16(buf, &offset_t);
+ if (val != TPM2_ECC_NIST_P256)
+ return -EINVAL;
+
+ /* KDF Scheme */
+ val = tpm_buf_read_u16(buf, &offset_t);
+ if (val != TPM_ALG_NULL)
+ return -EINVAL;
+
+ /* extract public key (x and y points) */
+ val = tpm_buf_read_u16(buf, &offset_t);
+ if (val != EC_PT_SZ)
+ return -EINVAL;
+ memcpy(chip->null_ec_key_x, &buf->data[offset_t], val);
+ offset_t += val;
+ val = tpm_buf_read_u16(buf, &offset_t);
+ if (val != EC_PT_SZ)
+ return -EINVAL;
+ memcpy(chip->null_ec_key_y, &buf->data[offset_t], val);
+ offset_t += val;
+
+ /* original length of the whole TPM2B */
+ offset_r += len;
+
+ /* should have exactly consumed the TPM2B public structure */
+ if (offset_t != offset_r)
+ return -EINVAL;
+ if (offset_r > param_len)
+ return -EINVAL;
+
+ /* creation data (skip) */
+ len = tpm_buf_read_u16(buf, &offset_r);
+ offset_r += len;
+ if (offset_r > param_len)
+ return -EINVAL;
+
+ /* creation digest (must be sha256) */
+ len = tpm_buf_read_u16(buf, &offset_r);
+ offset_r += len;
+ if (len != SHA256_DIGEST_SIZE || offset_r > param_len)
+ return -EINVAL;
+
+ /* TPMT_TK_CREATION follows */
+ /* tag, must be TPM_ST_CREATION (0x8021) */
+ val = tpm_buf_read_u16(buf, &offset_r);
+ if (val != TPM2_ST_CREATION || offset_r > param_len)
+ return -EINVAL;
+
+ /* hierarchy */
+ val = tpm_buf_read_u32(buf, &offset_r);
+ if (val != hierarchy || offset_r > param_len)
+ return -EINVAL;
+
+ /* the ticket digest HMAC (might not be sha256) */
+ len = tpm_buf_read_u16(buf, &offset_r);
+ offset_r += len;
+ if (offset_r > param_len)
+ return -EINVAL;
+
+ /*
+ * finally we have the name, which is a sha256 digest plus a 2
+ * byte algorithm type
+ */
+ len = tpm_buf_read_u16(buf, &offset_r);
+ if (offset_r + len != param_len + 8)
+ return -EINVAL;
+ if (len != SHA256_DIGEST_SIZE + 2)
+ return -EINVAL;
+
+ if (memcmp(chip->null_key_name, &buf->data[offset_r],
+ SHA256_DIGEST_SIZE + 2) != 0) {
+ dev_err(&chip->dev, "NULL Seed name comparison failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * tpm2_create_primary() - create a primary key using a fixed P-256 template
+ *
+ * @chip: the TPM chip to create under
+ * @hierarchy: The hierarchy handle to create under
+ * @handle: The returned volatile handle on success
+ * @name: The name of the returned key
+ *
+ * For platforms that might not have a persistent primary, this can be
+ * used to create one quickly on the fly (it uses Elliptic Curve not
+ * RSA, so even slow TPMs can create one fast). The template uses the
+ * TCG mandated H one for non-endorsement ECC primaries, i.e. P-256
+ * elliptic curve (the only current one all TPM2s are required to
+ * have) a sha256 name hash and no policy.
+ *
+ * Return:
+ * * 0 - OK
+ * * -errno - A system error
+ * * TPM_RC - A TPM error
+ */
+static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
+ u32 *handle, u8 *name)
+{
+ int rc;
+ struct tpm_buf buf;
+ struct tpm_buf template;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE_PRIMARY);
+ if (rc)
+ return rc;
+
+ rc = tpm_buf_init_sized(&template);
+ if (rc) {
+ tpm_buf_destroy(&buf);
+ return rc;
+ }
+
+ /*
+ * create the template. Note: in order for userspace to
+ * verify the security of the system, it will have to create
+ * and certify this NULL primary, meaning all the template
+ * parameters will have to be identical, so conform exactly to
+ * the TCG TPM v2.0 Provisioning Guidance for the SRK ECC
+ * key H template (H has zero size unique points)
+ */
+
+ /* key type */
+ tpm_buf_append_u16(&template, TPM_ALG_ECC);
+
+ /* name algorithm */
+ tpm_buf_append_u16(&template, TPM_ALG_SHA256);
+
+ /* object properties */
+ tpm_buf_append_u32(&template, TPM2_OA_TMPL);
+
+ /* sauth policy (empty) */
+ tpm_buf_append_u16(&template, 0);
+
+ /* BEGIN parameters: key specific; for ECC*/
+
+ /* symmetric algorithm */
+ tpm_buf_append_u16(&template, TPM_ALG_AES);
+
+ /* bits for symmetric algorithm */
+ tpm_buf_append_u16(&template, AES_KEY_BITS);
+
+ /* algorithm mode (must be CFB) */
+ tpm_buf_append_u16(&template, TPM_ALG_CFB);
+
+ /* scheme (NULL means any scheme) */
+ tpm_buf_append_u16(&template, TPM_ALG_NULL);
+
+ /* ECC Curve ID */
+ tpm_buf_append_u16(&template, TPM2_ECC_NIST_P256);
+
+ /* KDF Scheme */
+ tpm_buf_append_u16(&template, TPM_ALG_NULL);
+
+ /* unique: key specific; for ECC it is two zero size points */
+ tpm_buf_append_u16(&template, 0);
+ tpm_buf_append_u16(&template, 0);
+
+ /* END parameters */
+
+ /* primary handle */
+ tpm_buf_append_u32(&buf, hierarchy);
+ tpm_buf_append_empty_auth(&buf, TPM2_RS_PW);
+
+ /* sensitive create size is 4 for two empty buffers */
+ tpm_buf_append_u16(&buf, 4);
+
+ /* sensitive create auth data (empty) */
+ tpm_buf_append_u16(&buf, 0);
+
+ /* sensitive create sensitive data (empty) */
+ tpm_buf_append_u16(&buf, 0);
+
+ /* the public template */
+ tpm_buf_append(&buf, template.data, template.length);
+ tpm_buf_destroy(&template);
+
+ /* outside info (empty) */
+ tpm_buf_append_u16(&buf, 0);
+
+ /* creation PCR (none) */
+ tpm_buf_append_u32(&buf, 0);
+
+ rc = tpm_transmit_cmd(chip, &buf, 0,
+ "attempting to create NULL primary");
+
+ if (rc == TPM2_RC_SUCCESS)
+ rc = tpm2_parse_create_primary(chip, &buf, handle, hierarchy,
+ name);
+
+ tpm_buf_destroy(&buf);
+
+ return rc;
+}
+
+static int tpm2_create_null_primary(struct tpm_chip *chip)
+{
+ u32 null_key;
+ int rc;
+
+ rc = tpm2_create_primary(chip, TPM2_RH_NULL, &null_key,
+ chip->null_key_name);
+
+ if (rc == TPM2_RC_SUCCESS) {
+ unsigned int offset = 0; /* dummy offset for null key context */
+
+ rc = tpm2_save_context(chip, null_key, chip->null_key_context,
+ sizeof(chip->null_key_context), &offset);
+ tpm2_flush_context(chip, null_key);
+ }
+
+ return rc;
+}
+
+/**
+ * tpm2_sessions_init() - start of day initialization for the sessions code
+ * @chip: TPM chip
+ *
+ * Derive and context save the null primary and allocate memory in the
+ * struct tpm_chip for the authorizations.
+ */
+int tpm2_sessions_init(struct tpm_chip *chip)
+{
+ int rc;
+
+ rc = tpm2_create_null_primary(chip);
+ if (rc)
+ dev_err(&chip->dev, "TPM: security failed (NULL seed derivation): %d\n", rc);
+
+ chip->auth = kmalloc(sizeof(*chip->auth), GFP_KERNEL);
+ if (!chip->auth)
+ return -ENOMEM;
+
+ return rc;
+}
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index 363afdd4d1d3..4892d491da8d 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -68,8 +68,8 @@ void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space)
kfree(space->session_buf);
}
-static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
- unsigned int *offset, u32 *handle)
+int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
+ unsigned int *offset, u32 *handle)
{
struct tpm_buf tbuf;
struct tpm2_context *ctx;
@@ -105,6 +105,9 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
*handle = 0;
tpm_buf_destroy(&tbuf);
return -ENOENT;
+ } else if (tpm2_rc_value(rc) == TPM2_RC_INTEGRITY) {
+ tpm_buf_destroy(&tbuf);
+ return -EINVAL;
} else if (rc > 0) {
dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n",
__func__, rc);
@@ -119,8 +122,8 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
return 0;
}
-static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf,
- unsigned int buf_size, unsigned int *offset)
+int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf,
+ unsigned int buf_size, unsigned int *offset)
{
struct tpm_buf tbuf;
unsigned int body_size;
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 9c924a1440a9..2d2ae37153ba 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -51,34 +51,40 @@ static struct tpm_inf_dev tpm_dev;
static inline void tpm_data_out(unsigned char data, unsigned char offset)
{
+#ifdef CONFIG_HAS_IOPORT
if (tpm_dev.iotype == TPM_INF_IO_PORT)
outb(data, tpm_dev.data_regs + offset);
else
+#endif
writeb(data, tpm_dev.mem_base + tpm_dev.data_regs + offset);
}
static inline unsigned char tpm_data_in(unsigned char offset)
{
+#ifdef CONFIG_HAS_IOPORT
if (tpm_dev.iotype == TPM_INF_IO_PORT)
return inb(tpm_dev.data_regs + offset);
- else
- return readb(tpm_dev.mem_base + tpm_dev.data_regs + offset);
+#endif
+ return readb(tpm_dev.mem_base + tpm_dev.data_regs + offset);
}
static inline void tpm_config_out(unsigned char data, unsigned char offset)
{
+#ifdef CONFIG_HAS_IOPORT
if (tpm_dev.iotype == TPM_INF_IO_PORT)
outb(data, tpm_dev.config_port + offset);
else
+#endif
writeb(data, tpm_dev.mem_base + tpm_dev.index_off + offset);
}
static inline unsigned char tpm_config_in(unsigned char offset)
{
+#ifdef CONFIG_HAS_IOPORT
if (tpm_dev.iotype == TPM_INF_IO_PORT)
return inb(tpm_dev.config_port + offset);
- else
- return readb(tpm_dev.mem_base + tpm_dev.index_off + offset);
+#endif
+ return readb(tpm_dev.mem_base + tpm_dev.index_off + offset);
}
/* TPM header definitions */
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 714070ebb6e7..176cd8dbf1db 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -1057,11 +1057,6 @@ static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value)
clkrun_val &= ~LPC_CLKRUN_EN;
iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
- /*
- * Write any random value on port 0x80 which is on LPC, to make
- * sure LPC clock is running before sending any TPM command.
- */
- outb(0xCC, 0x80);
} else {
data->clkrun_enabled--;
if (data->clkrun_enabled)
@@ -1072,13 +1067,15 @@ static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value)
/* Enable LPC CLKRUN# */
clkrun_val |= LPC_CLKRUN_EN;
iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
-
- /*
- * Write any random value on port 0x80 which is on LPC, to make
- * sure LPC clock is running before sending any TPM command.
- */
- outb(0xCC, 0x80);
}
+
+#ifdef CONFIG_HAS_IOPORT
+ /*
+ * Write any random value on port 0x80 which is on LPC, to make
+ * sure LPC clock is running before sending any TPM command.
+ */
+ outb(0xCC, 0x80);
+#endif
}
static const struct tpm_class_ops tpm_tis = {
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 50af5fc7f570..7517a0dfd15c 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -451,8 +451,8 @@ config COMMON_CLK_FIXED_MMIO
config COMMON_CLK_K210
bool "Clock driver for the Canaan Kendryte K210 SoC"
- depends on OF && RISCV && SOC_CANAAN
- default SOC_CANAAN
+ depends on OF && RISCV && SOC_CANAAN_K210
+ default SOC_CANAAN_K210
help
Support for the Canaan Kendryte K210 RISC-V SoC clocks.
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 25371c91a58f..8cca52be993f 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -37,6 +37,10 @@ static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
+/* List of registered clks that use runtime PM */
+static HLIST_HEAD(clk_rpm_list);
+static DEFINE_MUTEX(clk_rpm_list_lock);
+
static const struct hlist_head *all_lists[] = {
&clk_root_list,
&clk_orphan_list,
@@ -59,6 +63,7 @@ struct clk_core {
struct clk_hw *hw;
struct module *owner;
struct device *dev;
+ struct hlist_node rpm_node;
struct device_node *of_node;
struct clk_core *parent;
struct clk_parent_map *parents;
@@ -122,6 +127,89 @@ static void clk_pm_runtime_put(struct clk_core *core)
pm_runtime_put_sync(core->dev);
}
+/**
+ * clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
+ *
+ * Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
+ * that disabling unused clks avoids a deadlock where a device is runtime PM
+ * resuming/suspending and the runtime PM callback is trying to grab the
+ * prepare_lock for something like clk_prepare_enable() while
+ * clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
+ * PM resume/suspend the device as well.
+ *
+ * Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
+ * success. Otherwise the lock is released on failure.
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int clk_pm_runtime_get_all(void)
+{
+ int ret;
+ struct clk_core *core, *failed;
+
+ /*
+ * Grab the list lock to prevent any new clks from being registered
+ * or unregistered until clk_pm_runtime_put_all().
+ */
+ mutex_lock(&clk_rpm_list_lock);
+
+ /*
+ * Runtime PM "get" all the devices that are needed for the clks
+ * currently registered. Do this without holding the prepare_lock, to
+ * avoid the deadlock.
+ */
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
+ ret = clk_pm_runtime_get(core);
+ if (ret) {
+ failed = core;
+ pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
+ dev_name(failed->dev), failed->name);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
+ if (core == failed)
+ break;
+
+ clk_pm_runtime_put(core);
+ }
+ mutex_unlock(&clk_rpm_list_lock);
+
+ return ret;
+}
+
+/**
+ * clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
+ *
+ * Put the runtime PM references taken in clk_pm_runtime_get_all() and release
+ * the 'clk_rpm_list_lock'.
+ */
+static void clk_pm_runtime_put_all(void)
+{
+ struct clk_core *core;
+
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
+ clk_pm_runtime_put(core);
+ mutex_unlock(&clk_rpm_list_lock);
+}
+
+static void clk_pm_runtime_init(struct clk_core *core)
+{
+ struct device *dev = core->dev;
+
+ if (dev && pm_runtime_enabled(dev)) {
+ core->rpm_enabled = true;
+
+ mutex_lock(&clk_rpm_list_lock);
+ hlist_add_head(&core->rpm_node, &clk_rpm_list);
+ mutex_unlock(&clk_rpm_list_lock);
+ }
+}
+
/*** locking ***/
static void clk_prepare_lock(void)
{
@@ -1381,9 +1469,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
if (core->flags & CLK_IGNORE_UNUSED)
return;
- if (clk_pm_runtime_get(core))
- return;
-
if (clk_core_is_prepared(core)) {
trace_clk_unprepare(core);
if (core->ops->unprepare_unused)
@@ -1392,8 +1477,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
core->ops->unprepare(core->hw);
trace_clk_unprepare_complete(core);
}
-
- clk_pm_runtime_put(core);
}
static void __init clk_disable_unused_subtree(struct clk_core *core)
@@ -1409,9 +1492,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_prepare_enable(core->parent);
- if (clk_pm_runtime_get(core))
- goto unprepare_out;
-
flags = clk_enable_lock();
if (core->enable_count)
@@ -1436,8 +1516,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
unlock_out:
clk_enable_unlock(flags);
- clk_pm_runtime_put(core);
-unprepare_out:
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_disable_unprepare(core->parent);
}
@@ -1453,6 +1531,7 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
static int __init clk_disable_unused(void)
{
struct clk_core *core;
+ int ret;
if (clk_ignore_unused) {
pr_warn("clk: Not disabling unused clocks\n");
@@ -1461,6 +1540,13 @@ static int __init clk_disable_unused(void)
pr_info("clk: Disabling unused clocks\n");
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
+ /*
+ * Grab the prepare lock to keep the clk topology stable while iterating
+ * over clks.
+ */
clk_prepare_lock();
hlist_for_each_entry(core, &clk_root_list, child_node)
@@ -1477,6 +1563,8 @@ static int __init clk_disable_unused(void)
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
+
return 0;
}
late_initcall_sync(clk_disable_unused);
@@ -3252,9 +3340,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
{
struct clk_core *child;
- clk_pm_runtime_get(c);
clk_summary_show_one(s, c, level);
- clk_pm_runtime_put(c);
hlist_for_each_entry(child, &c->children, child_node)
clk_summary_show_subtree(s, child, level + 1);
@@ -3264,11 +3350,15 @@ static int clk_summary_show(struct seq_file *s, void *data)
{
struct clk_core *c;
struct hlist_head **lists = s->private;
+ int ret;
seq_puts(s, " enable prepare protect duty hardware connection\n");
seq_puts(s, " clock count count count rate accuracy phase cycle enable consumer id\n");
seq_puts(s, "---------------------------------------------------------------------------------------------------------------------------------------------\n");
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
clk_prepare_lock();
@@ -3277,6 +3367,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
clk_summary_show_subtree(s, c, 0);
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
return 0;
}
@@ -3324,8 +3415,14 @@ static int clk_dump_show(struct seq_file *s, void *data)
struct clk_core *c;
bool first_node = true;
struct hlist_head **lists = s->private;
+ int ret;
+
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
seq_putc(s, '{');
+
clk_prepare_lock();
for (; *lists; lists++) {
@@ -3338,6 +3435,7 @@ static int clk_dump_show(struct seq_file *s, void *data)
}
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
seq_puts(s, "}\n");
return 0;
@@ -3981,8 +4079,6 @@ static int __clk_core_init(struct clk_core *core)
}
clk_core_reparent_orphans_nolock();
-
- kref_init(&core->ref);
out:
clk_pm_runtime_put(core);
unlock:
@@ -4211,6 +4307,22 @@ static void clk_core_free_parent_map(struct clk_core *core)
kfree(core->parents);
}
+/* Free memory allocated for a struct clk_core */
+static void __clk_release(struct kref *ref)
+{
+ struct clk_core *core = container_of(ref, struct clk_core, ref);
+
+ if (core->rpm_enabled) {
+ mutex_lock(&clk_rpm_list_lock);
+ hlist_del(&core->rpm_node);
+ mutex_unlock(&clk_rpm_list_lock);
+ }
+
+ clk_core_free_parent_map(core);
+ kfree_const(core->name);
+ kfree(core);
+}
+
static struct clk *
__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
{
@@ -4231,6 +4343,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
goto fail_out;
}
+ kref_init(&core->ref);
+
core->name = kstrdup_const(init->name, GFP_KERNEL);
if (!core->name) {
ret = -ENOMEM;
@@ -4243,9 +4357,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
}
core->ops = init->ops;
- if (dev && pm_runtime_enabled(dev))
- core->rpm_enabled = true;
core->dev = dev;
+ clk_pm_runtime_init(core);
core->of_node = np;
if (dev && dev->driver)
core->owner = dev->driver->owner;
@@ -4285,12 +4398,10 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
hw->clk = NULL;
fail_create_clk:
- clk_core_free_parent_map(core);
fail_parents:
fail_ops:
- kfree_const(core->name);
fail_name:
- kfree(core);
+ kref_put(&core->ref, __clk_release);
fail_out:
return ERR_PTR(ret);
}
@@ -4370,18 +4481,6 @@ int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(of_clk_hw_register);
-/* Free memory allocated for a clock. */
-static void __clk_release(struct kref *ref)
-{
- struct clk_core *core = container_of(ref, struct clk_core, ref);
-
- lockdep_assert_held(&prepare_lock);
-
- clk_core_free_parent_map(core);
- kfree_const(core->name);
- kfree(core);
-}
-
/*
* Empty clk_ops for unregistered clocks. These are used temporarily
* after clk_unregister() was called on a clock and until last clock
@@ -4472,7 +4571,8 @@ void clk_unregister(struct clk *clk)
if (ops == &clk_nodrv_ops) {
pr_err("%s: unregistered clock: %s\n", __func__,
clk->core->name);
- goto unlock;
+ clk_prepare_unlock();
+ return;
}
/*
* Assign empty clock ops for consumers that might still hold
@@ -4506,11 +4606,10 @@ void clk_unregister(struct clk *clk)
if (clk->core->protect_count)
pr_warn("%s: unregistering protected clock: %s\n",
__func__, clk->core->name);
+ clk_prepare_unlock();
kref_put(&clk->core->ref, __clk_release);
free_clk(clk);
-unlock:
- clk_prepare_unlock();
}
EXPORT_SYMBOL_GPL(clk_unregister);
@@ -4669,13 +4768,11 @@ void __clk_put(struct clk *clk)
if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX)
clk_set_rate_range_nolock(clk, 0, ULONG_MAX);
- owner = clk->core->owner;
- kref_put(&clk->core->ref, __clk_release);
-
clk_prepare_unlock();
+ owner = clk->core->owner;
+ kref_put(&clk->core->ref, __clk_release);
module_put(owner);
-
free_clk(clk);
}
diff --git a/drivers/clk/mediatek/clk-mt7988-infracfg.c b/drivers/clk/mediatek/clk-mt7988-infracfg.c
index 449041f8abbc..c8c023afe3e5 100644
--- a/drivers/clk/mediatek/clk-mt7988-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt7988-infracfg.c
@@ -156,7 +156,7 @@ static const struct mtk_gate infra_clks[] = {
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P1, "infra_pcie_peri_ck_26m_ck_p1",
"csw_infra_f26m_sel", 8),
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P2, "infra_pcie_peri_ck_26m_ck_p2",
- "csw_infra_f26m_sel", 9),
+ "infra_pcie_peri_ck_26m_ck_p3", 9),
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P3, "infra_pcie_peri_ck_26m_ck_p3",
"csw_infra_f26m_sel", 10),
/* INFRA1 */
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index 2e55368dc4d8..bd37ab4d1a9b 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "clk-mtk.h"
@@ -494,6 +495,16 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
return IS_ERR(base) ? PTR_ERR(base) : -ENOMEM;
}
+
+ devm_pm_runtime_enable(&pdev->dev);
+ /*
+ * Do a pm_runtime_resume_and_get() to workaround a possible
+ * deadlock between clk_register() and the genpd framework.
+ */
+ r = pm_runtime_resume_and_get(&pdev->dev);
+ if (r)
+ return r;
+
/* Calculate how many clk_hw_onecell_data entries to allocate */
num_clks = mcd->num_clks + mcd->num_composite_clks;
num_clks += mcd->num_fixed_clks + mcd->num_factor_clks;
@@ -574,6 +585,8 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
goto unregister_clks;
}
+ pm_runtime_put(&pdev->dev);
+
return r;
unregister_clks:
@@ -604,6 +617,8 @@ free_data:
free_base:
if (mcd->shared_io && base)
iounmap(base);
+
+ pm_runtime_put(&pdev->dev);
return r;
}
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 8602c02047d0..45c5255bcd11 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -768,6 +768,7 @@ static struct clk_smd_rpm *msm8976_clks[] = {
static const struct rpm_smd_clk_desc rpm_clk_msm8976 = {
.clks = msm8976_clks,
+ .num_clks = ARRAY_SIZE(msm8976_clks),
.icc_clks = bimc_pcnoc_snoc_smmnoc_icc_clks,
.num_icc_clks = ARRAY_SIZE(bimc_pcnoc_snoc_smmnoc_icc_clks),
};
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index e7a4068b9f39..df9618ab7eea 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -487,9 +487,14 @@ int gdsc_register(struct gdsc_desc *desc,
if (!scs[i] || !scs[i]->supply)
continue;
- scs[i]->rsupply = devm_regulator_get(dev, scs[i]->supply);
- if (IS_ERR(scs[i]->rsupply))
- return PTR_ERR(scs[i]->rsupply);
+ scs[i]->rsupply = devm_regulator_get_optional(dev, scs[i]->supply);
+ if (IS_ERR(scs[i]->rsupply)) {
+ ret = PTR_ERR(scs[i]->rsupply);
+ if (ret != -ENODEV)
+ return ret;
+
+ scs[i]->rsupply = NULL;
+ }
}
data->num_domains = num;
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 3484e6cc80ad..503c6f5b20d5 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -13,9 +13,9 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
-#include <linux/property.h>
#define EXYNOS_CLKOUT_NR_CLKS 1
#define EXYNOS_CLKOUT_PARENTS 32
@@ -84,17 +84,24 @@ MODULE_DEVICE_TABLE(of, exynos_clkout_ids);
static int exynos_clkout_match_parent_dev(struct device *dev, u32 *mux_mask)
{
const struct exynos_clkout_variant *variant;
+ const struct of_device_id *match;
if (!dev->parent) {
dev_err(dev, "not instantiated from MFD\n");
return -EINVAL;
}
- variant = device_get_match_data(dev->parent);
- if (!variant) {
+ /*
+ * 'exynos_clkout_ids' arrays is not the ids array matched by
+ * the dev->parent driver, so of_device_get_match_data() or
+ * device_get_match_data() cannot be used here.
+ */
+ match = of_match_device(exynos_clkout_ids, dev->parent);
+ if (!match) {
dev_err(dev, "cannot match parent device\n");
return -EINVAL;
}
+ variant = match->data;
*mux_mask = variant->mux_mask;
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index 8951ffc14ff5..6a4b2b9ef30a 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -182,6 +182,8 @@ static struct ccu_nkm pll_mipi_clk = {
&ccu_nkm_ops,
CLK_SET_RATE_UNGATE | CLK_SET_RATE_PARENT),
.features = CCU_FEATURE_CLOSEST_RATE,
+ .min_rate = 500000000,
+ .max_rate = 1400000000,
},
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index 42568c616181..892df807275c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -1181,11 +1181,18 @@ static const u32 usb2_clk_regs[] = {
SUN50I_H6_USB3_CLK_REG,
};
+static struct ccu_mux_nb sun50i_h6_cpu_nb = {
+ .common = &cpux_clk.common,
+ .cm = &cpux_clk.mux,
+ .delay_us = 1,
+ .bypass_index = 0, /* index of 24 MHz oscillator */
+};
+
static int sun50i_h6_ccu_probe(struct platform_device *pdev)
{
void __iomem *reg;
+ int i, ret;
u32 val;
- int i;
reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
@@ -1252,7 +1259,15 @@ static int sun50i_h6_ccu_probe(struct platform_device *pdev)
val |= BIT(24);
writel(val, reg + SUN50I_H6_HDMI_CEC_CLK_REG);
- return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h6_ccu_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h6_ccu_desc);
+ if (ret)
+ return ret;
+
+ /* Reparent CPU during PLL CPUX rate changes */
+ ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
+ &sun50i_h6_cpu_nb);
+
+ return 0;
}
static const struct of_device_id sun50i_h6_ccu_ids[] = {
diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
index 8babce55302f..ac0091b4ce24 100644
--- a/drivers/clk/sunxi-ng/ccu_common.c
+++ b/drivers/clk/sunxi-ng/ccu_common.c
@@ -44,6 +44,16 @@ bool ccu_is_better_rate(struct ccu_common *common,
unsigned long current_rate,
unsigned long best_rate)
{
+ unsigned long min_rate, max_rate;
+
+ clk_hw_get_rate_range(&common->hw, &min_rate, &max_rate);
+
+ if (current_rate > max_rate)
+ return false;
+
+ if (current_rate < min_rate)
+ return false;
+
if (common->features & CCU_FEATURE_CLOSEST_RATE)
return abs(current_rate - target_rate) < abs(best_rate - target_rate);
@@ -122,6 +132,7 @@ static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev,
for (i = 0; i < desc->hw_clks->num ; i++) {
struct clk_hw *hw = desc->hw_clks->hws[i];
+ struct ccu_common *common = hw_to_ccu_common(hw);
const char *name;
if (!hw)
@@ -136,6 +147,14 @@ static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev,
pr_err("Couldn't register clock %d - %s\n", i, name);
goto err_clk_unreg;
}
+
+ if (common->max_rate)
+ clk_hw_set_rate_range(hw, common->min_rate,
+ common->max_rate);
+ else
+ WARN(common->min_rate,
+ "No max_rate, ignoring min_rate of clock %d - %s\n",
+ i, name);
}
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
diff --git a/drivers/clk/sunxi-ng/ccu_common.h b/drivers/clk/sunxi-ng/ccu_common.h
index 942a72c09437..329734f8cf42 100644
--- a/drivers/clk/sunxi-ng/ccu_common.h
+++ b/drivers/clk/sunxi-ng/ccu_common.h
@@ -31,6 +31,9 @@ struct ccu_common {
u16 lock_reg;
u32 prediv;
+ unsigned long min_rate;
+ unsigned long max_rate;
+
unsigned long features;
spinlock_t *lock;
struct clk_hw hw;
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 8d4a52056684..5bb43cc1a8df 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -331,7 +331,7 @@ static u64 notrace hisi_161010101_read_cntvct_el0(void)
return __hisi_161010101_read_reg(cntvct_el0);
}
-static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
+static const struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
/*
* Note that trailing spaces are required to properly match
* the OEM table information.
diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c
index 8da972dc1713..3fcbd02b2483 100644
--- a/drivers/clocksource/renesas-ostm.c
+++ b/drivers/clocksource/renesas-ostm.c
@@ -210,6 +210,7 @@ static int __init ostm_init(struct device_node *np)
pr_info("%pOF: used for clock events\n", np);
}
+ of_node_set_flag(np, OF_POPULATED);
return 0;
err_cleanup:
@@ -224,7 +225,7 @@ err_free:
TIMER_OF_DECLARE(ostm, "renesas,ostm", ostm_init);
-#ifdef CONFIG_ARCH_RZG2L
+#if defined(CONFIG_ARCH_RZG2L) || defined(CONFIG_ARCH_R9A09G057)
static int __init ostm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 56acf2617262..b7a34b1a975e 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -129,7 +129,6 @@ struct dmtimer {
void __iomem *func_base; /* function register base */
atomic_t enabled;
- unsigned long rate;
unsigned reserved:1;
unsigned posted:1;
unsigned omap1:1;
diff --git a/drivers/comedi/drivers/vmk80xx.c b/drivers/comedi/drivers/vmk80xx.c
index 4536ed43f65b..84dce5184a77 100644
--- a/drivers/comedi/drivers/vmk80xx.c
+++ b/drivers/comedi/drivers/vmk80xx.c
@@ -641,33 +641,22 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
struct vmk80xx_private *devpriv = dev->private;
struct usb_interface *intf = comedi_to_usb_interface(dev);
struct usb_host_interface *iface_desc = intf->cur_altsetting;
- struct usb_endpoint_descriptor *ep_desc;
- int i;
-
- if (iface_desc->desc.bNumEndpoints != 2)
- return -ENODEV;
-
- for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
- ep_desc = &iface_desc->endpoint[i].desc;
-
- if (usb_endpoint_is_int_in(ep_desc) ||
- usb_endpoint_is_bulk_in(ep_desc)) {
- if (!devpriv->ep_rx)
- devpriv->ep_rx = ep_desc;
- continue;
- }
+ struct usb_endpoint_descriptor *ep_rx_desc, *ep_tx_desc;
+ int ret;
- if (usb_endpoint_is_int_out(ep_desc) ||
- usb_endpoint_is_bulk_out(ep_desc)) {
- if (!devpriv->ep_tx)
- devpriv->ep_tx = ep_desc;
- continue;
- }
- }
+ if (devpriv->model == VMK8061_MODEL)
+ ret = usb_find_common_endpoints(iface_desc, &ep_rx_desc,
+ &ep_tx_desc, NULL, NULL);
+ else
+ ret = usb_find_common_endpoints(iface_desc, NULL, NULL,
+ &ep_rx_desc, &ep_tx_desc);
- if (!devpriv->ep_rx || !devpriv->ep_tx)
+ if (ret)
return -ENODEV;
+ devpriv->ep_rx = ep_rx_desc;
+ devpriv->ep_tx = ep_tx_desc;
+
if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
return -EINVAL;
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 2015c9fcc3c9..6a342b0c0140 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -50,7 +50,8 @@
#define AMD_PSTATE_TRANSITION_LATENCY 20000
#define AMD_PSTATE_TRANSITION_DELAY 1000
-#define AMD_PSTATE_PREFCORE_THRESHOLD 166
+#define CPPC_HIGHEST_PERF_PERFORMANCE 196
+#define CPPC_HIGHEST_PERF_DEFAULT 166
/*
* TODO: We need more time to fine tune processors with shared memory solution
@@ -67,6 +68,7 @@ static struct cpufreq_driver amd_pstate_epp_driver;
static int cppc_state = AMD_PSTATE_UNDEFINED;
static bool cppc_enabled;
static bool amd_pstate_prefcore = true;
+static struct quirk_entry *quirks;
/*
* AMD Energy Preference Performance (EPP)
@@ -111,6 +113,41 @@ static unsigned int epp_values[] = {
typedef int (*cppc_mode_transition_fn)(int);
+static struct quirk_entry quirk_amd_7k62 = {
+ .nominal_freq = 2600,
+ .lowest_freq = 550,
+};
+
+static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi)
+{
+ /**
+ * match the broken bios for family 17h processor support CPPC V2
+ * broken BIOS lack of nominal_freq and lowest_freq capabilities
+ * definition in ACPI tables
+ */
+ if (boot_cpu_has(X86_FEATURE_ZEN2)) {
+ quirks = dmi->driver_data;
+ pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident);
+ return 1;
+ }
+
+ return 0;
+}
+
+static const struct dmi_system_id amd_pstate_quirks_table[] __initconst = {
+ {
+ .callback = dmi_matched_7k62_bios_bug,
+ .ident = "AMD EPYC 7K62",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VERSION, "5.14"),
+ DMI_MATCH(DMI_BIOS_RELEASE, "12/12/2019"),
+ },
+ .driver_data = &quirk_amd_7k62,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(dmi, amd_pstate_quirks_table);
+
static inline int get_mode_idx_from_str(const char *str, size_t size)
{
int i;
@@ -290,6 +327,21 @@ static inline int amd_pstate_enable(bool enable)
return static_call(amd_pstate_enable)(enable);
}
+static u32 amd_pstate_highest_perf_set(struct amd_cpudata *cpudata)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ /*
+ * For AMD CPUs with Family ID 19H and Model ID range 0x70 to 0x7f,
+ * the highest performance level is set to 196.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=218759
+ */
+ if (c->x86 == 0x19 && (c->x86_model >= 0x70 && c->x86_model <= 0x7f))
+ return CPPC_HIGHEST_PERF_PERFORMANCE;
+
+ return CPPC_HIGHEST_PERF_DEFAULT;
+}
+
static int pstate_init_perf(struct amd_cpudata *cpudata)
{
u64 cap1;
@@ -306,7 +358,7 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
* the default max perf.
*/
if (cpudata->hw_prefcore)
- highest_perf = AMD_PSTATE_PREFCORE_THRESHOLD;
+ highest_perf = amd_pstate_highest_perf_set(cpudata);
else
highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
@@ -330,7 +382,7 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
return ret;
if (cpudata->hw_prefcore)
- highest_perf = AMD_PSTATE_PREFCORE_THRESHOLD;
+ highest_perf = amd_pstate_highest_perf_set(cpudata);
else
highest_perf = cppc_perf.highest_perf;
@@ -604,78 +656,6 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
cpufreq_cpu_put(policy);
}
-static int amd_get_min_freq(struct amd_cpudata *cpudata)
-{
- struct cppc_perf_caps cppc_perf;
-
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
-
- /* Switch to khz */
- return cppc_perf.lowest_freq * 1000;
-}
-
-static int amd_get_max_freq(struct amd_cpudata *cpudata)
-{
- struct cppc_perf_caps cppc_perf;
- u32 max_perf, max_freq, nominal_freq, nominal_perf;
- u64 boost_ratio;
-
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
-
- nominal_freq = cppc_perf.nominal_freq;
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
- max_perf = READ_ONCE(cpudata->highest_perf);
-
- boost_ratio = div_u64(max_perf << SCHED_CAPACITY_SHIFT,
- nominal_perf);
-
- max_freq = nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT;
-
- /* Switch to khz */
- return max_freq * 1000;
-}
-
-static int amd_get_nominal_freq(struct amd_cpudata *cpudata)
-{
- struct cppc_perf_caps cppc_perf;
-
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
-
- /* Switch to khz */
- return cppc_perf.nominal_freq * 1000;
-}
-
-static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata)
-{
- struct cppc_perf_caps cppc_perf;
- u32 lowest_nonlinear_freq, lowest_nonlinear_perf,
- nominal_freq, nominal_perf;
- u64 lowest_nonlinear_ratio;
-
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
-
- nominal_freq = cppc_perf.nominal_freq;
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
-
- lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
-
- lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
- nominal_perf);
-
- lowest_nonlinear_freq = nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT;
-
- /* Switch to khz */
- return lowest_nonlinear_freq * 1000;
-}
-
static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
{
struct amd_cpudata *cpudata = policy->driver_data;
@@ -828,9 +808,93 @@ free_cpufreq_put:
mutex_unlock(&amd_pstate_driver_lock);
}
+/*
+ * Get pstate transition delay time from ACPI tables that firmware set
+ * instead of using hardcode value directly.
+ */
+static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
+{
+ u32 transition_delay_ns;
+
+ transition_delay_ns = cppc_get_transition_latency(cpu);
+ if (transition_delay_ns == CPUFREQ_ETERNAL)
+ return AMD_PSTATE_TRANSITION_DELAY;
+
+ return transition_delay_ns / NSEC_PER_USEC;
+}
+
+/*
+ * Get pstate transition latency value from ACPI tables that firmware
+ * set instead of using hardcode value directly.
+ */
+static u32 amd_pstate_get_transition_latency(unsigned int cpu)
+{
+ u32 transition_latency;
+
+ transition_latency = cppc_get_transition_latency(cpu);
+ if (transition_latency == CPUFREQ_ETERNAL)
+ return AMD_PSTATE_TRANSITION_LATENCY;
+
+ return transition_latency;
+}
+
+/*
+ * amd_pstate_init_freq: Initialize the max_freq, min_freq,
+ * nominal_freq and lowest_nonlinear_freq for
+ * the @cpudata object.
+ *
+ * Requires: highest_perf, lowest_perf, nominal_perf and
+ * lowest_nonlinear_perf members of @cpudata to be
+ * initialized.
+ *
+ * Returns 0 on success, non-zero value on failure.
+ */
+static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
+{
+ int ret;
+ u32 min_freq;
+ u32 highest_perf, max_freq;
+ u32 nominal_perf, nominal_freq;
+ u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
+ u32 boost_ratio, lowest_nonlinear_ratio;
+ struct cppc_perf_caps cppc_perf;
+
+ ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
+ if (ret)
+ return ret;
+
+ if (quirks && quirks->lowest_freq)
+ min_freq = quirks->lowest_freq * 1000;
+ else
+ min_freq = cppc_perf.lowest_freq * 1000;
+
+ if (quirks && quirks->nominal_freq)
+ nominal_freq = quirks->nominal_freq ;
+ else
+ nominal_freq = cppc_perf.nominal_freq;
+
+ nominal_perf = READ_ONCE(cpudata->nominal_perf);
+
+ highest_perf = READ_ONCE(cpudata->highest_perf);
+ boost_ratio = div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
+ max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
+
+ lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
+ lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
+ nominal_perf);
+ lowest_nonlinear_freq = (nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
+
+ WRITE_ONCE(cpudata->min_freq, min_freq);
+ WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq);
+ WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
+ WRITE_ONCE(cpudata->max_freq, max_freq);
+
+ return 0;
+}
+
static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
+ int min_freq, max_freq, nominal_freq, ret;
struct device *dev;
struct amd_cpudata *cpudata;
@@ -855,20 +919,25 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
- min_freq = amd_get_min_freq(cpudata);
- max_freq = amd_get_max_freq(cpudata);
- nominal_freq = amd_get_nominal_freq(cpudata);
- lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
+ ret = amd_pstate_init_freq(cpudata);
+ if (ret)
+ goto free_cpudata1;
+
+ min_freq = READ_ONCE(cpudata->min_freq);
+ max_freq = READ_ONCE(cpudata->max_freq);
+ nominal_freq = READ_ONCE(cpudata->nominal_freq);
- if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
- dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
- min_freq, max_freq);
+ if (min_freq <= 0 || max_freq <= 0 ||
+ nominal_freq <= 0 || min_freq > max_freq) {
+ dev_err(dev,
+ "min_freq(%d) or max_freq(%d) or nominal_freq (%d) value is incorrect, check _CPC in ACPI tables\n",
+ min_freq, max_freq, nominal_freq);
ret = -EINVAL;
goto free_cpudata1;
}
- policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY;
- policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY;
+ policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu);
+ policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu);
policy->min = min_freq;
policy->max = max_freq;
@@ -896,13 +965,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
goto free_cpudata2;
}
- /* Initial processor data capability frequencies */
- cpudata->max_freq = max_freq;
- cpudata->min_freq = min_freq;
cpudata->max_limit_freq = max_freq;
cpudata->min_limit_freq = min_freq;
- cpudata->nominal_freq = nominal_freq;
- cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
policy->driver_data = cpudata;
@@ -966,7 +1030,7 @@ static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
int max_freq;
struct amd_cpudata *cpudata = policy->driver_data;
- max_freq = amd_get_max_freq(cpudata);
+ max_freq = READ_ONCE(cpudata->max_freq);
if (max_freq < 0)
return max_freq;
@@ -979,7 +1043,7 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
int freq;
struct amd_cpudata *cpudata = policy->driver_data;
- freq = amd_get_lowest_nonlinear_freq(cpudata);
+ freq = READ_ONCE(cpudata->lowest_nonlinear_freq);
if (freq < 0)
return freq;
@@ -1290,7 +1354,7 @@ static bool amd_pstate_acpi_pm_profile_undefined(void)
static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
+ int min_freq, max_freq, nominal_freq, ret;
struct amd_cpudata *cpudata;
struct device *dev;
u64 value;
@@ -1317,13 +1381,18 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
- min_freq = amd_get_min_freq(cpudata);
- max_freq = amd_get_max_freq(cpudata);
- nominal_freq = amd_get_nominal_freq(cpudata);
- lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
- if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
- dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
- min_freq, max_freq);
+ ret = amd_pstate_init_freq(cpudata);
+ if (ret)
+ goto free_cpudata1;
+
+ min_freq = READ_ONCE(cpudata->min_freq);
+ max_freq = READ_ONCE(cpudata->max_freq);
+ nominal_freq = READ_ONCE(cpudata->nominal_freq);
+ if (min_freq <= 0 || max_freq <= 0 ||
+ nominal_freq <= 0 || min_freq > max_freq) {
+ dev_err(dev,
+ "min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect, check _CPC in ACPI tables\n",
+ min_freq, max_freq, nominal_freq);
ret = -EINVAL;
goto free_cpudata1;
}
@@ -1333,12 +1402,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
/* It will be updated by governor */
policy->cur = policy->cpuinfo.min_freq;
- /* Initial processor data capability frequencies */
- cpudata->max_freq = max_freq;
- cpudata->min_freq = min_freq;
- cpudata->nominal_freq = nominal_freq;
- cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
-
policy->driver_data = cpudata;
cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0);
@@ -1656,6 +1719,11 @@ static int __init amd_pstate_init(void)
if (cpufreq_get_current_driver())
return -EEXIST;
+ quirks = NULL;
+
+ /* check if this machine need CPPC quirks */
+ dmi_check_system(amd_pstate_quirks_table);
+
switch (cppc_state) {
case AMD_PSTATE_UNDEFINED:
/* Disable on the following configs by default:
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 1a1857b0a6f4..ea8438550b49 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -481,9 +481,12 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct private_data *priv;
+
if (!policy)
return 0;
- struct private_data *priv = policy->driver_data;
+
+ priv = policy->driver_data;
cpufreq_cpu_put(policy);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 64420d9cfd1e..15f1d41920a3 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -741,10 +741,15 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
{
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- struct cppc_cpudata *cpu_data = policy->driver_data;
+ struct cppc_cpudata *cpu_data;
u64 delivered_perf;
int ret;
+ if (!policy)
+ return -ENODEV;
+
+ cpu_data = policy->driver_data;
+
cpufreq_cpu_put(policy);
ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
@@ -822,10 +827,15 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- struct cppc_cpudata *cpu_data = policy->driver_data;
+ struct cppc_cpudata *cpu_data;
u64 desired_perf;
int ret;
+ if (!policy)
+ return -ENODEV;
+
+ cpu_data = policy->driver_data;
+
cpufreq_cpu_put(policy);
ret = cppc_get_desired_perf(cpu, &desired_perf);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index b993a498084b..c74dd1e01e0d 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -104,6 +104,9 @@ static const struct of_device_id allowlist[] __initconst = {
*/
static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "allwinner,sun50i-h6", },
+ { .compatible = "allwinner,sun50i-h616", },
+ { .compatible = "allwinner,sun50i-h618", },
+ { .compatible = "allwinner,sun50i-h700", },
{ .compatible = "apple,arm-platform", },
@@ -195,19 +198,18 @@ static const struct of_device_id blocklist[] __initconst = {
static bool __init cpu0_node_has_opp_v2_prop(void)
{
- struct device_node *np = of_cpu_device_node_get(0);
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(0);
bool ret = false;
if (of_property_present(np, "operating-points-v2"))
ret = true;
- of_node_put(np);
return ret;
}
static int __init cpufreq_dt_platdev_init(void)
{
- struct device_node *np = of_find_node_by_path("/");
+ struct device_node *np __free(device_node) = of_find_node_by_path("/");
const struct of_device_id *match;
const void *data = NULL;
@@ -223,11 +225,9 @@ static int __init cpufreq_dt_platdev_init(void)
if (cpu0_node_has_opp_v2_prop() && !of_match_node(blocklist, np))
goto create_pdev;
- of_node_put(np);
return -ENODEV;
create_pdev:
- of_node_put(np);
return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt",
-1, data,
sizeof(struct cpufreq_dt_platform_data)));
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 2d83bbc65dd0..907e22632fda 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -68,12 +68,9 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
*/
static const char *find_supply_name(struct device *dev)
{
- struct device_node *np;
+ struct device_node *np __free(device_node) = of_node_get(dev->of_node);
struct property *pp;
int cpu = dev->id;
- const char *name = NULL;
-
- np = of_node_get(dev->of_node);
/* This must be valid for sure */
if (WARN_ON(!np))
@@ -82,22 +79,16 @@ static const char *find_supply_name(struct device *dev)
/* Try "cpu0" for older DTs */
if (!cpu) {
pp = of_find_property(np, "cpu0-supply", NULL);
- if (pp) {
- name = "cpu0";
- goto node_put;
- }
+ if (pp)
+ return "cpu0";
}
pp = of_find_property(np, "cpu-supply", NULL);
- if (pp) {
- name = "cpu";
- goto node_put;
- }
+ if (pp)
+ return "cpu";
dev_dbg(dev, "no regulator for cpu%d\n", cpu);
-node_put:
- of_node_put(np);
- return name;
+ return NULL;
}
static int cpufreq_init(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 66e10a19d76a..a45aac17c20f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1679,10 +1679,13 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
*/
if (cpufreq_driver->offline) {
cpufreq_driver->offline(policy);
- } else if (cpufreq_driver->exit) {
- cpufreq_driver->exit(policy);
- policy->freq_table = NULL;
+ return;
}
+
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
+
+ policy->freq_table = NULL;
}
static int cpufreq_offline(unsigned int cpu)
@@ -1740,7 +1743,7 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
}
/* We did light-weight exit earlier, do full tear down now */
- if (cpufreq_driver->offline)
+ if (cpufreq_driver->offline && cpufreq_driver->exit)
cpufreq_driver->exit(policy);
up_write(&policy->rwsem);
@@ -2582,6 +2585,40 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_get_policy);
+DEFINE_PER_CPU(unsigned long, cpufreq_pressure);
+
+/**
+ * cpufreq_update_pressure() - Update cpufreq pressure for CPUs
+ * @policy: cpufreq policy of the CPUs.
+ *
+ * Update the value of cpufreq pressure for all @cpus in the policy.
+ */
+static void cpufreq_update_pressure(struct cpufreq_policy *policy)
+{
+ unsigned long max_capacity, capped_freq, pressure;
+ u32 max_freq;
+ int cpu;
+
+ cpu = cpumask_first(policy->related_cpus);
+ max_freq = arch_scale_freq_ref(cpu);
+ capped_freq = policy->max;
+
+ /*
+ * Handle properly the boost frequencies, which should simply clean
+ * the cpufreq pressure value.
+ */
+ if (max_freq <= capped_freq) {
+ pressure = 0;
+ } else {
+ max_capacity = arch_scale_cpu_capacity(cpu);
+ pressure = max_capacity -
+ mult_frac(max_capacity, capped_freq, max_freq);
+ }
+
+ for_each_cpu(cpu, policy->related_cpus)
+ WRITE_ONCE(per_cpu(cpufreq_pressure, cpu), pressure);
+}
+
/**
* cpufreq_set_policy - Modify cpufreq policy parameters.
* @policy: Policy object to modify.
@@ -2637,6 +2674,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
trace_cpu_frequency_limits(policy);
+ cpufreq_update_pressure(policy);
+
policy->cached_target_freq = UINT_MAX;
pr_debug("new min and max freqs are %u - %u kHz\n",
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index c17dc51a5a02..10e80d912b8d 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -70,7 +70,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
struct cpufreq_frequency_table *table)
{
struct cpufreq_frequency_table *pos;
- unsigned int freq, next_larger = ~0;
+ unsigned int freq, prev_smaller = 0;
bool found = false;
pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
@@ -86,12 +86,12 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
break;
}
- if ((next_larger > freq) && (freq > policy->max))
- next_larger = freq;
+ if ((prev_smaller < freq) && (freq <= policy->max))
+ prev_smaller = freq;
}
if (!found) {
- policy->max = next_larger;
+ policy->max = prev_smaller;
cpufreq_verify_within_cpu_limits(policy);
}
@@ -194,7 +194,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
}
if (optimal.driver_data > i) {
if (suboptimal.driver_data > i) {
- WARN(1, "Invalid frequency table: %d\n", policy->cpu);
+ WARN(1, "Invalid frequency table: %u\n", policy->cpu);
return 0;
}
@@ -254,7 +254,7 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
if (show_boost ^ (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
- count += sprintf(&buf[count], "%d ", pos->frequency);
+ count += sprintf(&buf[count], "%u ", pos->frequency);
}
count += sprintf(&buf[count], "\n");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index dbbf299f4219..4b986c044741 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -173,7 +173,6 @@ struct vid_data {
* based on the MSR_IA32_MISC_ENABLE value and whether or
* not the maximum reported turbo P-state is different from
* the maximum reported non-turbo one.
- * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq.
* @min_perf_pct: Minimum capacity limit in percent of the maximum turbo
* P-state capacity.
* @max_perf_pct: Maximum capacity limit in percent of the maximum turbo
@@ -182,7 +181,6 @@ struct vid_data {
struct global_params {
bool no_turbo;
bool turbo_disabled;
- bool turbo_disabled_mf;
int max_perf_pct;
int min_perf_pct;
};
@@ -213,7 +211,7 @@ struct global_params {
* @epp_policy: Last saved policy used to set EPP/EPB
* @epp_default: Power on default HWP energy performance
* preference/bias
- * @epp_cached Cached HWP energy-performance preference value
+ * @epp_cached: Cached HWP energy-performance preference value
* @hwp_req_cached: Cached value of the last HWP Request MSR
* @hwp_cap_cached: Cached value of the last HWP Capabilities MSR
* @last_io_update: Last time when IO wake flag was set
@@ -292,11 +290,11 @@ struct pstate_funcs {
static struct pstate_funcs pstate_funcs __read_mostly;
-static int hwp_active __read_mostly;
-static int hwp_mode_bdw __read_mostly;
-static bool per_cpu_limits __read_mostly;
+static bool hwp_active __ro_after_init;
+static int hwp_mode_bdw __ro_after_init;
+static bool per_cpu_limits __ro_after_init;
+static bool hwp_forced __ro_after_init;
static bool hwp_boost __read_mostly;
-static bool hwp_forced __read_mostly;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
@@ -594,12 +592,13 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq);
}
-static inline void update_turbo_state(void)
+static bool turbo_is_disabled(void)
{
u64 misc_en;
rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
- global.turbo_disabled = misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
+
+ return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
}
static int min_perf_pct_min(void)
@@ -1154,12 +1153,15 @@ static void intel_pstate_update_policies(void)
static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
struct cpufreq_policy *policy)
{
- policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
+ intel_pstate_get_hwp_cap(cpudata);
+
+ policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ?
cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
+
refresh_frequency_limits(policy);
}
-static void intel_pstate_update_max_freq(unsigned int cpu)
+static void intel_pstate_update_limits(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
@@ -1171,25 +1173,12 @@ static void intel_pstate_update_max_freq(unsigned int cpu)
cpufreq_cpu_release(policy);
}
-static void intel_pstate_update_limits(unsigned int cpu)
+static void intel_pstate_update_limits_for_all(void)
{
- mutex_lock(&intel_pstate_driver_lock);
-
- update_turbo_state();
- /*
- * If turbo has been turned on or off globally, policy limits for
- * all CPUs need to be updated to reflect that.
- */
- if (global.turbo_disabled_mf != global.turbo_disabled) {
- global.turbo_disabled_mf = global.turbo_disabled;
- arch_set_max_freq_ratio(global.turbo_disabled);
- for_each_possible_cpu(cpu)
- intel_pstate_update_max_freq(cpu);
- } else {
- cpufreq_update_policy(cpu);
- }
+ int cpu;
- mutex_unlock(&intel_pstate_driver_lock);
+ for_each_possible_cpu(cpu)
+ intel_pstate_update_limits(cpu);
}
/************************** sysfs begin ************************/
@@ -1287,11 +1276,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,
return -EAGAIN;
}
- update_turbo_state();
- if (global.turbo_disabled)
- ret = sprintf(buf, "%u\n", global.turbo_disabled);
- else
- ret = sprintf(buf, "%u\n", global.no_turbo);
+ ret = sprintf(buf, "%u\n", global.no_turbo);
mutex_unlock(&intel_pstate_driver_lock);
@@ -1302,32 +1287,34 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
- int ret;
+ bool no_turbo;
- ret = sscanf(buf, "%u", &input);
- if (ret != 1)
+ if (sscanf(buf, "%u", &input) != 1)
return -EINVAL;
mutex_lock(&intel_pstate_driver_lock);
if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
- return -EAGAIN;
+ count = -EAGAIN;
+ goto unlock_driver;
}
- mutex_lock(&intel_pstate_limits_lock);
+ no_turbo = !!clamp_t(int, input, 0, 1);
+
+ if (no_turbo == global.no_turbo)
+ goto unlock_driver;
- update_turbo_state();
if (global.turbo_disabled) {
pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
- mutex_unlock(&intel_pstate_limits_lock);
- mutex_unlock(&intel_pstate_driver_lock);
- return -EPERM;
+ count = -EPERM;
+ goto unlock_driver;
}
- global.no_turbo = clamp_t(int, input, 0, 1);
+ WRITE_ONCE(global.no_turbo, no_turbo);
- if (global.no_turbo) {
+ mutex_lock(&intel_pstate_limits_lock);
+
+ if (no_turbo) {
struct cpudata *cpu = all_cpu_data[0];
int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;
@@ -1338,9 +1325,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
mutex_unlock(&intel_pstate_limits_lock);
- intel_pstate_update_policies();
- arch_set_max_freq_ratio(global.no_turbo);
+ intel_pstate_update_limits_for_all();
+ arch_set_max_freq_ratio(no_turbo);
+unlock_driver:
mutex_unlock(&intel_pstate_driver_lock);
return count;
@@ -1621,7 +1609,6 @@ static void intel_pstate_notify_work(struct work_struct *work)
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);
if (policy) {
- intel_pstate_get_hwp_cap(cpudata);
__intel_pstate_update_max_freq(cpudata, policy);
cpufreq_cpu_release(policy);
@@ -1636,11 +1623,10 @@ static cpumask_t hwp_intr_enable_mask;
void notify_hwp_interrupt(void)
{
unsigned int this_cpu = smp_processor_id();
- struct cpudata *cpudata;
unsigned long flags;
u64 value;
- if (!READ_ONCE(hwp_active) || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
+ if (!hwp_active || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
return;
rdmsrl_safe(MSR_HWP_STATUS, &value);
@@ -1652,24 +1638,8 @@ void notify_hwp_interrupt(void)
if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
goto ack_intr;
- /*
- * Currently we never free all_cpu_data. And we can't reach here
- * without this allocated. But for safety for future changes, added
- * check.
- */
- if (unlikely(!READ_ONCE(all_cpu_data)))
- goto ack_intr;
-
- /*
- * The free is done during cleanup, when cpufreq registry is failed.
- * We wouldn't be here if it fails on init or switch status. But for
- * future changes, added check.
- */
- cpudata = READ_ONCE(all_cpu_data[this_cpu]);
- if (unlikely(!cpudata))
- goto ack_intr;
-
- schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10));
+ schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work,
+ msecs_to_jiffies(10));
spin_unlock_irqrestore(&hwp_notify_lock, flags);
@@ -1682,7 +1652,7 @@ ack_intr:
static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
{
- unsigned long flags;
+ bool cancel_work;
if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
return;
@@ -1690,22 +1660,22 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
- spin_lock_irqsave(&hwp_notify_lock, flags);
- if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask))
- cancel_delayed_work(&cpudata->hwp_notify_work);
- spin_unlock_irqrestore(&hwp_notify_lock, flags);
+ spin_lock_irq(&hwp_notify_lock);
+ cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
+ spin_unlock_irq(&hwp_notify_lock);
+
+ if (cancel_work)
+ cancel_delayed_work_sync(&cpudata->hwp_notify_work);
}
static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
{
/* Enable HWP notification interrupt for guaranteed performance change */
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
- unsigned long flags;
-
- spin_lock_irqsave(&hwp_notify_lock, flags);
+ spin_lock_irq(&hwp_notify_lock);
INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
- spin_unlock_irqrestore(&hwp_notify_lock, flags);
+ spin_unlock_irq(&hwp_notify_lock);
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
@@ -1791,7 +1761,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
u32 vid;
val = (u64)pstate << 8;
- if (global.no_turbo && !global.turbo_disabled)
+ if (READ_ONCE(global.no_turbo) && !global.turbo_disabled)
val |= (u64)1 << 32;
vid_fp = cpudata->vid.min + mul_fp(
@@ -1956,7 +1926,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
u64 val;
val = (u64)pstate << 8;
- if (global.no_turbo && !global.turbo_disabled)
+ if (READ_ONCE(global.no_turbo) && !global.turbo_disabled)
val |= (u64)1 << 32;
return val;
@@ -2029,14 +1999,6 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
}
-static void intel_pstate_max_within_limits(struct cpudata *cpu)
-{
- int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
-
- update_turbo_state();
- intel_pstate_set_pstate(cpu, pstate);
-}
-
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu);
@@ -2262,7 +2224,7 @@ static inline int32_t get_target_pstate(struct cpudata *cpu)
sample->busy_scaled = busy_frac * 100;
- target = global.no_turbo || global.turbo_disabled ?
+ target = READ_ONCE(global.no_turbo) ?
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
target += target >> 2;
target = mul_fp(target, busy_frac);
@@ -2306,8 +2268,6 @@ static void intel_pstate_adjust_pstate(struct cpudata *cpu)
struct sample *sample;
int target_pstate;
- update_turbo_state();
-
target_pstate = get_target_pstate(cpu);
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
@@ -2437,6 +2397,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
+#ifdef CONFIG_ACPI
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(BROADWELL_D, core_funcs),
X86_MATCH(BROADWELL_X, core_funcs),
@@ -2445,6 +2406,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
{}
};
+#endif
static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
X86_MATCH(KABYLAKE, core_funcs),
@@ -2526,7 +2488,7 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
static int intel_pstate_get_max_freq(struct cpudata *cpu)
{
- return global.turbo_disabled || global.no_turbo ?
+ return READ_ONCE(global.no_turbo) ?
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
}
@@ -2611,12 +2573,14 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
+
/*
* NOHZ_FULL CPUs need this as the governor callback may not
* be invoked on them.
*/
intel_pstate_clear_update_util_hook(policy->cpu);
- intel_pstate_max_within_limits(cpu);
+ intel_pstate_set_pstate(cpu, pstate);
} else {
intel_pstate_set_update_util_hook(policy->cpu);
}
@@ -2659,10 +2623,9 @@ static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
{
int max_freq;
- update_turbo_state();
if (hwp_active) {
intel_pstate_get_hwp_cap(cpu);
- max_freq = global.no_turbo || global.turbo_disabled ?
+ max_freq = READ_ONCE(global.no_turbo) ?
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
} else {
max_freq = intel_pstate_get_max_freq(cpu);
@@ -2756,9 +2719,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.min_freq = cpu->pstate.min_freq;
- update_turbo_state();
- global.turbo_disabled_mf = global.turbo_disabled;
- policy->cpuinfo.max_freq = global.turbo_disabled ?
+ policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ?
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
policy->min = policy->cpuinfo.min_freq;
@@ -2923,8 +2884,6 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
struct cpufreq_freqs freqs;
int target_pstate;
- update_turbo_state();
-
freqs.old = policy->cur;
freqs.new = target_freq;
@@ -2946,8 +2905,6 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
struct cpudata *cpu = all_cpu_data[policy->cpu];
int target_pstate;
- update_turbo_state();
-
target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq);
target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
@@ -2965,9 +2922,9 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
int old_pstate = cpu->pstate.current_pstate;
int cap_pstate, min_pstate, max_pstate, target_pstate;
- update_turbo_state();
- cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) :
- HWP_HIGHEST_PERF(hwp_cap);
+ cap_pstate = READ_ONCE(global.no_turbo) ?
+ HWP_GUARANTEED_PERF(hwp_cap) :
+ HWP_HIGHEST_PERF(hwp_cap);
/* Optimization: Avoid unnecessary divisions. */
@@ -3135,10 +3092,8 @@ static void intel_pstate_driver_cleanup(void)
if (intel_pstate_driver == &intel_pstate)
intel_pstate_clear_update_util_hook(cpu);
- spin_lock(&hwp_notify_lock);
kfree(all_cpu_data[cpu]);
WRITE_ONCE(all_cpu_data[cpu], NULL);
- spin_unlock(&hwp_notify_lock);
}
}
cpus_read_unlock();
@@ -3155,6 +3110,10 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
memset(&global, 0, sizeof(global));
global.max_perf_pct = 100;
+ global.turbo_disabled = turbo_is_disabled();
+ global.no_turbo = global.turbo_disabled;
+
+ arch_set_max_freq_ratio(global.turbo_disabled);
intel_pstate_driver = driver;
ret = cpufreq_register_driver(intel_pstate_driver);
@@ -3466,7 +3425,7 @@ static int __init intel_pstate_init(void)
* deal with it.
*/
if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
- WRITE_ONCE(hwp_active, 1);
+ hwp_active = true;
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
intel_cpufreq.attr = hwp_cpufreq_attrs;
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index a0a61919bc4c..518606adf14e 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -707,6 +707,15 @@ static const struct mtk_cpufreq_platform_data mt7623_platform_data = {
.ccifreq_supported = false,
};
+static const struct mtk_cpufreq_platform_data mt7988_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 200000,
+ .proc_max_volt = 900000,
+ .sram_min_volt = 0,
+ .sram_max_volt = 1150000,
+ .ccifreq_supported = true,
+};
+
static const struct mtk_cpufreq_platform_data mt8183_platform_data = {
.min_volt_shift = 100000,
.max_volt_shift = 200000,
@@ -740,6 +749,7 @@ static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
{ .compatible = "mediatek,mt2712", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt7622", .data = &mt7622_platform_data },
{ .compatible = "mediatek,mt7623", .data = &mt7623_platform_data },
+ { .compatible = "mediatek,mt7988a", .data = &mt7988_platform_data },
{ .compatible = "mediatek,mt8167", .data = &mt8516_platform_data },
{ .compatible = "mediatek,mt817x", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt8173", .data = &mt2701_platform_data },
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 70b0f21968a0..ec8df5496a0c 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -347,8 +347,8 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
throttled_freq = freq_hz / HZ_PER_KHZ;
- /* Update thermal pressure (the boost frequencies are accepted) */
- arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
+ /* Update HW pressure (the boost frequencies are accepted) */
+ arch_update_hw_pressure(policy->related_cpus, throttled_freq);
/*
* In the unlikely case policy is unregistered do not enable
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 32a9c88f8ff6..0b882765cd66 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/arm-smccc.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
@@ -18,26 +19,155 @@
#include <linux/pm_opp.h>
#include <linux/slab.h>
-#define MAX_NAME_LEN 7
-
#define NVMEM_MASK 0x7
#define NVMEM_SHIFT 5
static struct platform_device *cpufreq_dt_pdev, *sun50i_cpufreq_pdev;
+struct sunxi_cpufreq_data {
+ u32 (*efuse_xlate)(u32 speedbin);
+};
+
+static u32 sun50i_h6_efuse_xlate(u32 speedbin)
+{
+ u32 efuse_value;
+
+ efuse_value = (speedbin >> NVMEM_SHIFT) & NVMEM_MASK;
+
+ /*
+ * We treat unexpected efuse values as if the SoC was from
+ * the slowest bin. Expected efuse values are 1-3, slowest
+ * to fastest.
+ */
+ if (efuse_value >= 1 && efuse_value <= 3)
+ return efuse_value - 1;
+ else
+ return 0;
+}
+
+static int get_soc_id_revision(void)
+{
+#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
+ return arm_smccc_get_soc_id_revision();
+#else
+ return SMCCC_RET_NOT_SUPPORTED;
+#endif
+}
+
+/*
+ * Judging by the OPP tables in the vendor BSP, the quality order of the
+ * returned speedbin index is 4 -> 0/2 -> 3 -> 1, from worst to best.
+ * 0 and 2 seem identical from the OPP tables' point of view.
+ */
+static u32 sun50i_h616_efuse_xlate(u32 speedbin)
+{
+ int ver_bits = get_soc_id_revision();
+ u32 value = 0;
+
+ switch (speedbin & 0xffff) {
+ case 0x2000:
+ value = 0;
+ break;
+ case 0x2400:
+ case 0x7400:
+ case 0x2c00:
+ case 0x7c00:
+ if (ver_bits != SMCCC_RET_NOT_SUPPORTED && ver_bits <= 1) {
+ /* ic version A/B */
+ value = 1;
+ } else {
+ /* ic version C and later version */
+ value = 2;
+ }
+ break;
+ case 0x5000:
+ case 0x5400:
+ case 0x6000:
+ value = 3;
+ break;
+ case 0x5c00:
+ value = 4;
+ break;
+ case 0x5d00:
+ value = 0;
+ break;
+ default:
+ pr_warn("sun50i-cpufreq-nvmem: unknown speed bin 0x%x, using default bin 0\n",
+ speedbin & 0xffff);
+ value = 0;
+ break;
+ }
+
+ return value;
+}
+
+static struct sunxi_cpufreq_data sun50i_h6_cpufreq_data = {
+ .efuse_xlate = sun50i_h6_efuse_xlate,
+};
+
+static struct sunxi_cpufreq_data sun50i_h616_cpufreq_data = {
+ .efuse_xlate = sun50i_h616_efuse_xlate,
+};
+
+static const struct of_device_id cpu_opp_match_list[] = {
+ { .compatible = "allwinner,sun50i-h6-operating-points",
+ .data = &sun50i_h6_cpufreq_data,
+ },
+ { .compatible = "allwinner,sun50i-h616-operating-points",
+ .data = &sun50i_h616_cpufreq_data,
+ },
+ {}
+};
+
+/**
+ * dt_has_supported_hw() - Check if any OPPs use opp-supported-hw
+ *
+ * If we ask the cpufreq framework to use the opp-supported-hw feature, it
+ * will ignore every OPP node without that DT property. If none of the OPPs
+ * have it, the driver will fail probing, due to the lack of OPPs.
+ *
+ * Returns true if we have at least one OPP with the opp-supported-hw property.
+ */
+static bool dt_has_supported_hw(void)
+{
+ bool has_opp_supported_hw = false;
+ struct device_node *np, *opp;
+ struct device *cpu_dev;
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev)
+ return false;
+
+ np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ if (!np)
+ return false;
+
+ for_each_child_of_node(np, opp) {
+ if (of_find_property(opp, "opp-supported-hw", NULL)) {
+ has_opp_supported_hw = true;
+ break;
+ }
+ }
+
+ of_node_put(np);
+
+ return has_opp_supported_hw;
+}
+
/**
* sun50i_cpufreq_get_efuse() - Determine speed grade from efuse value
- * @versions: Set to the value parsed from efuse
*
- * Returns 0 if success.
+ * Returns non-negative speed bin index on success, a negative error
+ * value otherwise.
*/
-static int sun50i_cpufreq_get_efuse(u32 *versions)
+static int sun50i_cpufreq_get_efuse(void)
{
+ const struct sunxi_cpufreq_data *opp_data;
struct nvmem_cell *speedbin_nvmem;
+ const struct of_device_id *match;
struct device_node *np;
struct device *cpu_dev;
- u32 *speedbin, efuse_value;
- size_t len;
+ u32 *speedbin;
int ret;
cpu_dev = get_cpu_device(0);
@@ -48,12 +178,12 @@ static int sun50i_cpufreq_get_efuse(u32 *versions)
if (!np)
return -ENOENT;
- ret = of_device_is_compatible(np,
- "allwinner,sun50i-h6-operating-points");
- if (!ret) {
+ match = of_match_node(cpu_opp_match_list, np);
+ if (!match) {
of_node_put(np);
return -ENOENT;
}
+ opp_data = match->data;
speedbin_nvmem = of_nvmem_cell_get(np, NULL);
of_node_put(np);
@@ -61,33 +191,25 @@ static int sun50i_cpufreq_get_efuse(u32 *versions)
return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
"Could not get nvmem cell\n");
- speedbin = nvmem_cell_read(speedbin_nvmem, &len);
+ speedbin = nvmem_cell_read(speedbin_nvmem, NULL);
nvmem_cell_put(speedbin_nvmem);
if (IS_ERR(speedbin))
return PTR_ERR(speedbin);
- efuse_value = (*speedbin >> NVMEM_SHIFT) & NVMEM_MASK;
-
- /*
- * We treat unexpected efuse values as if the SoC was from
- * the slowest bin. Expected efuse values are 1-3, slowest
- * to fastest.
- */
- if (efuse_value >= 1 && efuse_value <= 3)
- *versions = efuse_value - 1;
- else
- *versions = 0;
+ ret = opp_data->efuse_xlate(*speedbin);
kfree(speedbin);
- return 0;
+
+ return ret;
};
static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
{
int *opp_tokens;
- char name[MAX_NAME_LEN];
- unsigned int cpu;
- u32 speed = 0;
+ char name[] = "speedXXXXXXXXXXX"; /* Integers can take 11 chars max */
+ unsigned int cpu, supported_hw;
+ struct dev_pm_opp_config config = {};
+ int speed;
int ret;
opp_tokens = kcalloc(num_possible_cpus(), sizeof(*opp_tokens),
@@ -95,13 +217,24 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
if (!opp_tokens)
return -ENOMEM;
- ret = sun50i_cpufreq_get_efuse(&speed);
- if (ret) {
+ speed = sun50i_cpufreq_get_efuse();
+ if (speed < 0) {
kfree(opp_tokens);
- return ret;
+ return speed;
}
- snprintf(name, MAX_NAME_LEN, "speed%d", speed);
+ /*
+ * We need at least one OPP with the "opp-supported-hw" property,
+ * or else the upper layers will ignore every OPP and will bail out.
+ */
+ if (dt_has_supported_hw()) {
+ supported_hw = 1U << speed;
+ config.supported_hw = &supported_hw;
+ config.supported_hw_count = 1;
+ }
+
+ snprintf(name, sizeof(name), "speed%d", speed);
+ config.prop_name = name;
for_each_possible_cpu(cpu) {
struct device *cpu_dev = get_cpu_device(cpu);
@@ -111,12 +244,11 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
goto free_opp;
}
- opp_tokens[cpu] = dev_pm_opp_set_prop_name(cpu_dev, name);
- if (opp_tokens[cpu] < 0) {
- ret = opp_tokens[cpu];
- pr_err("Failed to set prop name\n");
+ ret = dev_pm_opp_set_config(cpu_dev, &config);
+ if (ret < 0)
goto free_opp;
- }
+
+ opp_tokens[cpu] = ret;
}
cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
@@ -131,7 +263,7 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
free_opp:
for_each_possible_cpu(cpu)
- dev_pm_opp_put_prop_name(opp_tokens[cpu]);
+ dev_pm_opp_clear_config(opp_tokens[cpu]);
kfree(opp_tokens);
return ret;
@@ -145,7 +277,7 @@ static void sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
for_each_possible_cpu(cpu)
- dev_pm_opp_put_prop_name(opp_tokens[cpu]);
+ dev_pm_opp_clear_config(opp_tokens[cpu]);
kfree(opp_tokens);
}
@@ -160,6 +292,9 @@ static struct platform_driver sun50i_cpufreq_driver = {
static const struct of_device_id sun50i_cpufreq_match_list[] = {
{ .compatible = "allwinner,sun50i-h6" },
+ { .compatible = "allwinner,sun50i-h616" },
+ { .compatible = "allwinner,sun50i-h618" },
+ { .compatible = "allwinner,sun50i-h700" },
{}
};
MODULE_DEVICE_TABLE(of, sun50i_cpufreq_match_list);
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
index aae951d4e77c..514146d98bca 100644
--- a/drivers/cpufreq/tegra124-cpufreq.c
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -52,12 +52,15 @@ out:
static int tegra124_cpufreq_probe(struct platform_device *pdev)
{
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(0);
struct tegra124_cpufreq_priv *priv;
- struct device_node *np;
struct device *cpu_dev;
struct platform_device_info cpufreq_dt_devinfo = {};
int ret;
+ if (!np)
+ return -ENODEV;
+
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -66,15 +69,9 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
if (!cpu_dev)
return -ENODEV;
- np = of_cpu_device_node_get(0);
- if (!np)
- return -ENODEV;
-
priv->cpu_clk = of_clk_get_by_name(np, "cpu_g");
- if (IS_ERR(priv->cpu_clk)) {
- ret = PTR_ERR(priv->cpu_clk);
- goto out_put_np;
- }
+ if (IS_ERR(priv->cpu_clk))
+ return PTR_ERR(priv->cpu_clk);
priv->dfll_clk = of_clk_get_by_name(np, "dfll");
if (IS_ERR(priv->dfll_clk)) {
@@ -110,8 +107,6 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- of_node_put(np);
-
return 0;
out_put_pllp_clk:
@@ -122,8 +117,6 @@ out_put_dfll_clk:
clk_put(priv->dfll_clk);
out_put_cpu_clk:
clk_put(priv->cpu_clk);
-out_put_np:
- of_node_put(np);
return ret;
}
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 46c41e2ca727..714ed53753fa 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -347,12 +347,10 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
static const struct of_device_id *ti_cpufreq_match_node(void)
{
- struct device_node *np;
+ struct device_node *np __free(device_node) = of_find_node_by_path("/");
const struct of_device_id *match;
- np = of_find_node_by_path("/");
match = of_match_node(ti_cpufreq_of_match, np);
- of_node_put(np);
return match;
}
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 9acde71558d5..bb8761c8a42e 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -439,13 +439,8 @@ static int cpuidle_coupled_clear_pokes(int cpu)
static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled)
{
- cpumask_t cpus;
- int ret;
-
- cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
- ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus);
-
- return ret;
+ return cpumask_first_and_and(cpu_online_mask, &coupled->coupled_cpus,
+ &cpuidle_coupled_poke_pending) < nr_cpu_ids;
}
/**
@@ -626,9 +621,7 @@ out:
static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled)
{
- cpumask_t cpus;
- cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
- coupled->online_count = cpumask_weight(&cpus);
+ coupled->online_count = cpumask_weight_and(cpu_online_mask, &coupled->coupled_cpus);
}
/**
diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c
index 13bf743f885b..602c4dfdd7e2 100644
--- a/drivers/cpuidle/cpuidle-kirkwood.c
+++ b/drivers/cpuidle/cpuidle-kirkwood.c
@@ -59,15 +59,14 @@ static int kirkwood_cpuidle_probe(struct platform_device *pdev)
return cpuidle_register(&kirkwood_idle_driver, NULL);
}
-static int kirkwood_cpuidle_remove(struct platform_device *pdev)
+static void kirkwood_cpuidle_remove(struct platform_device *pdev)
{
cpuidle_unregister(&kirkwood_idle_driver);
- return 0;
}
static struct platform_driver kirkwood_cpuidle_driver = {
.probe = kirkwood_cpuidle_probe,
- .remove = kirkwood_cpuidle_remove,
+ .remove_new = kirkwood_cpuidle_remove,
.driver = {
.name = "kirkwood_cpuidle",
},
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 8e9058c4ea63..6617eb494a11 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -44,6 +44,7 @@ static DEFINE_PER_CPU(struct ladder_device, ladder_devices);
/**
* ladder_do_selection - prepares private data for a state change
+ * @dev: the CPU
* @ldev: the ladder device
* @old_idx: the current state index
* @new_idx: the new target state index
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 3d02702456a5..94f23c6fc93b 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -67,6 +67,7 @@ config CRYPTO_DEV_GEODE
config ZCRYPT
tristate "Support for s390 cryptographic adapters"
depends on S390
+ depends on AP
select HW_RANDOM
help
Select this option if you want to enable support for
@@ -74,23 +75,6 @@ config ZCRYPT
to 8 in Coprocessor (CEXxC), EP11 Coprocessor (CEXxP)
or Accelerator (CEXxA) mode.
-config ZCRYPT_DEBUG
- bool "Enable debug features for s390 cryptographic adapters"
- default n
- depends on DEBUG_KERNEL
- depends on ZCRYPT
- help
- Say 'Y' here to enable some additional debug features on the
- s390 cryptographic adapters driver.
-
- There will be some more sysfs attributes displayed for ap cards
- and queues and some flags on crypto requests are interpreted as
- debugging messages to force error injection.
-
- Do not enable on production level kernel build.
-
- If unsure, say N.
-
config PKEY
tristate "Kernel API for protected key handling"
depends on S390
@@ -660,6 +644,14 @@ config CRYPTO_DEV_ROCKCHIP_DEBUG
This will create /sys/kernel/debug/rk3288_crypto/stats for displaying
the number of requests per algorithm and other internal stats.
+config CRYPTO_DEV_TEGRA
+ tristate "Enable Tegra Security Engine"
+ depends on TEGRA_HOST1X
+ select CRYPTO_ENGINE
+
+ help
+ Select this to enable Tegra Security Engine which accelerates various
+ AES encryption/decryption and HASH algorithms.
config CRYPTO_DEV_ZYNQMP_AES
tristate "Support for Xilinx ZynqMP AES hw accelerator"
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 95331bc6456b..ad4ccef67d12 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
obj-$(CONFIG_CRYPTO_DEV_SL3516) += gemini/
obj-y += stm32/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
+obj-$(CONFIG_CRYPTO_DEV_TEGRA) += tegra/
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
#obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c
index 83a9093eff25..a895e4289efa 100644
--- a/drivers/crypto/atmel-i2c.c
+++ b/drivers/crypto/atmel-i2c.c
@@ -51,7 +51,7 @@ static void atmel_i2c_checksum(struct atmel_i2c_cmd *cmd)
*__crc16 = cpu_to_le16(bitrev16(crc16(0, data, len)));
}
-void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd)
+void atmel_i2c_init_read_config_cmd(struct atmel_i2c_cmd *cmd)
{
cmd->word_addr = COMMAND;
cmd->opcode = OPCODE_READ;
@@ -68,7 +68,31 @@ void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd)
cmd->msecs = MAX_EXEC_TIME_READ;
cmd->rxsize = READ_RSP_SIZE;
}
-EXPORT_SYMBOL(atmel_i2c_init_read_cmd);
+EXPORT_SYMBOL(atmel_i2c_init_read_config_cmd);
+
+int atmel_i2c_init_read_otp_cmd(struct atmel_i2c_cmd *cmd, u16 addr)
+{
+ if (addr < 0 || addr > OTP_ZONE_SIZE)
+ return -1;
+
+ cmd->word_addr = COMMAND;
+ cmd->opcode = OPCODE_READ;
+ /*
+ * Read the word from OTP zone that may contain e.g. serial
+ * numbers or similar if persistently pre-initialized and locked
+ */
+ cmd->param1 = OTP_ZONE;
+ cmd->param2 = cpu_to_le16(addr);
+ cmd->count = READ_COUNT;
+
+ atmel_i2c_checksum(cmd);
+
+ cmd->msecs = MAX_EXEC_TIME_READ;
+ cmd->rxsize = READ_RSP_SIZE;
+
+ return 0;
+}
+EXPORT_SYMBOL(atmel_i2c_init_read_otp_cmd);
void atmel_i2c_init_random_cmd(struct atmel_i2c_cmd *cmd)
{
@@ -301,7 +325,7 @@ static int device_sanity_check(struct i2c_client *client)
if (!cmd)
return -ENOMEM;
- atmel_i2c_init_read_cmd(cmd);
+ atmel_i2c_init_read_config_cmd(cmd);
ret = atmel_i2c_send_receive(client, cmd);
if (ret)
diff --git a/drivers/crypto/atmel-i2c.h b/drivers/crypto/atmel-i2c.h
index c0bd429ee2c7..72f04c15682f 100644
--- a/drivers/crypto/atmel-i2c.h
+++ b/drivers/crypto/atmel-i2c.h
@@ -64,6 +64,10 @@ struct atmel_i2c_cmd {
/* Definitions for eeprom organization */
#define CONFIGURATION_ZONE 0
+#define OTP_ZONE 1
+
+/* Definitions for eeprom zone sizes */
+#define OTP_ZONE_SIZE 64
/* Definitions for Indexes common to all commands */
#define RSP_DATA_IDX 1 /* buffer index of data in response */
@@ -124,6 +128,7 @@ struct atmel_ecc_driver_data {
* @wake_token : wake token array of zeros
* @wake_token_sz : size in bytes of the wake_token
* @tfm_count : number of active crypto transformations on i2c client
+ * @hwrng : hold the hardware generated rng
*
* Reads and writes from/to the i2c client are sequential. The first byte
* transmitted to the device is treated as the byte size. Any attempt to send
@@ -177,7 +182,8 @@ void atmel_i2c_flush_queue(void);
int atmel_i2c_send_receive(struct i2c_client *client, struct atmel_i2c_cmd *cmd);
-void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd);
+void atmel_i2c_init_read_config_cmd(struct atmel_i2c_cmd *cmd);
+int atmel_i2c_init_read_otp_cmd(struct atmel_i2c_cmd *cmd, u16 addr);
void atmel_i2c_init_random_cmd(struct atmel_i2c_cmd *cmd);
void atmel_i2c_init_genkey_cmd(struct atmel_i2c_cmd *cmd, u16 keyid);
int atmel_i2c_init_ecdh_cmd(struct atmel_i2c_cmd *cmd,
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index c77f482d2a97..24ffdf505023 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -91,6 +91,62 @@ static int atmel_sha204a_rng_read(struct hwrng *rng, void *data, size_t max,
return max;
}
+static int atmel_sha204a_otp_read(struct i2c_client *client, u16 addr, u8 *otp)
+{
+ struct atmel_i2c_cmd cmd;
+ int ret = -1;
+
+ if (atmel_i2c_init_read_otp_cmd(&cmd, addr) < 0) {
+ dev_err(&client->dev, "failed, invalid otp address %04X\n",
+ addr);
+ return ret;
+ }
+
+ ret = atmel_i2c_send_receive(client, &cmd);
+
+ if (cmd.data[0] == 0xff) {
+ dev_err(&client->dev, "failed, device not ready\n");
+ return -ret;
+ }
+
+ memcpy(otp, cmd.data+1, 4);
+
+ return ret;
+}
+
+static ssize_t otp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u16 addr;
+ u8 otp[OTP_ZONE_SIZE];
+ char *str = buf;
+ struct i2c_client *client = to_i2c_client(dev);
+ int i;
+
+ for (addr = 0; addr < OTP_ZONE_SIZE/4; addr++) {
+ if (atmel_sha204a_otp_read(client, addr, otp + addr * 4) < 0) {
+ dev_err(dev, "failed to read otp zone\n");
+ break;
+ }
+ }
+
+ for (i = 0; i < addr*2; i++)
+ str += sprintf(str, "%02X", otp[i]);
+ str += sprintf(str, "\n");
+ return str - buf;
+}
+static DEVICE_ATTR_RO(otp);
+
+static struct attribute *atmel_sha204a_attrs[] = {
+ &dev_attr_otp.attr,
+ NULL
+};
+
+static const struct attribute_group atmel_sha204a_groups = {
+ .name = "atsha204a",
+ .attrs = atmel_sha204a_attrs,
+};
+
static int atmel_sha204a_probe(struct i2c_client *client)
{
struct atmel_i2c_client_priv *i2c_priv;
@@ -111,6 +167,16 @@ static int atmel_sha204a_probe(struct i2c_client *client)
if (ret)
dev_warn(&client->dev, "failed to register RNG (%d)\n", ret);
+ /* otp read out */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ ret = sysfs_create_group(&client->dev.kobj, &atmel_sha204a_groups);
+ if (ret) {
+ dev_err(&client->dev, "failed to register sysfs entry\n");
+ return ret;
+ }
+
return ret;
}
@@ -123,6 +189,8 @@ static void atmel_sha204a_remove(struct i2c_client *client)
return;
}
+ sysfs_remove_group(&client->dev.kobj, &atmel_sha204a_groups);
+
kfree((void *)i2c_priv->hwrng.priv);
}
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index 07989bb8c220..3fdc64b5a65e 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -495,7 +495,7 @@ static void spu2_dump_omd(u8 *omd, u16 hash_key_len, u16 ciph_key_len,
if (hash_iv_len) {
packet_log(" Hash IV Length %u bytes\n", hash_iv_len);
packet_dump(" hash IV: ", ptr, hash_iv_len);
- ptr += ciph_key_len;
+ ptr += hash_iv_len;
}
if (ciph_iv_len) {
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index bdf367f3f679..bd418dea586d 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -512,6 +512,7 @@ static const struct of_device_id caam_match[] = {
MODULE_DEVICE_TABLE(of, caam_match);
struct caam_imx_data {
+ bool page0_access;
const struct clk_bulk_data *clks;
int num_clks;
};
@@ -524,6 +525,7 @@ static const struct clk_bulk_data caam_imx6_clks[] = {
};
static const struct caam_imx_data caam_imx6_data = {
+ .page0_access = true,
.clks = caam_imx6_clks,
.num_clks = ARRAY_SIZE(caam_imx6_clks),
};
@@ -534,6 +536,7 @@ static const struct clk_bulk_data caam_imx7_clks[] = {
};
static const struct caam_imx_data caam_imx7_data = {
+ .page0_access = true,
.clks = caam_imx7_clks,
.num_clks = ARRAY_SIZE(caam_imx7_clks),
};
@@ -545,6 +548,7 @@ static const struct clk_bulk_data caam_imx6ul_clks[] = {
};
static const struct caam_imx_data caam_imx6ul_data = {
+ .page0_access = true,
.clks = caam_imx6ul_clks,
.num_clks = ARRAY_SIZE(caam_imx6ul_clks),
};
@@ -554,15 +558,19 @@ static const struct clk_bulk_data caam_vf610_clks[] = {
};
static const struct caam_imx_data caam_vf610_data = {
+ .page0_access = true,
.clks = caam_vf610_clks,
.num_clks = ARRAY_SIZE(caam_vf610_clks),
};
+static const struct caam_imx_data caam_imx8ulp_data;
+
static const struct soc_device_attribute caam_imx_soc_table[] = {
{ .soc_id = "i.MX6UL", .data = &caam_imx6ul_data },
{ .soc_id = "i.MX6*", .data = &caam_imx6_data },
{ .soc_id = "i.MX7*", .data = &caam_imx7_data },
{ .soc_id = "i.MX8M*", .data = &caam_imx7_data },
+ { .soc_id = "i.MX8ULP", .data = &caam_imx8ulp_data },
{ .soc_id = "VF*", .data = &caam_vf610_data },
{ .family = "Freescale i.MX" },
{ /* sentinel */ }
@@ -860,6 +868,7 @@ static int caam_probe(struct platform_device *pdev)
int pg_size;
int BLOCK_OFFSET = 0;
bool reg_access = true;
+ const struct caam_imx_data *imx_soc_data;
ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
if (!ctrlpriv)
@@ -894,12 +903,20 @@ static int caam_probe(struct platform_device *pdev)
return -EINVAL;
}
+ imx_soc_data = imx_soc_match->data;
+ reg_access = reg_access && imx_soc_data->page0_access;
+ /*
+ * CAAM clocks cannot be controlled from kernel.
+ */
+ if (!imx_soc_data->num_clks)
+ goto iomap_ctrl;
+
ret = init_clocks(dev, imx_soc_match->data);
if (ret)
return ret;
}
-
+iomap_ctrl:
/* Get configuration properties from device tree */
/* First, get register page */
ctrl = devm_of_iomap(dev, nprop, 0, NULL);
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index 473301237760..ff6ceb4feee0 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -39,44 +39,38 @@ static const struct sp_dev_vdata dev_vdata[] = {
},
};
-#ifdef CONFIG_ACPI
static const struct acpi_device_id sp_acpi_match[] = {
{ "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
{ },
};
MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
-#endif
-#ifdef CONFIG_OF
static const struct of_device_id sp_of_match[] = {
{ .compatible = "amd,ccp-seattle-v1a",
.data = (const void *)&dev_vdata[0] },
{ },
};
MODULE_DEVICE_TABLE(of, sp_of_match);
-#endif
static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev)
{
-#ifdef CONFIG_OF
const struct of_device_id *match;
match = of_match_node(sp_of_match, pdev->dev.of_node);
if (match && match->data)
return (struct sp_dev_vdata *)match->data;
-#endif
+
return NULL;
}
static struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev)
{
-#ifdef CONFIG_ACPI
const struct acpi_device_id *match;
match = acpi_match_device(sp_acpi_match, &pdev->dev);
if (match && match->driver_data)
return (struct sp_dev_vdata *)match->driver_data;
-#endif
+
return NULL;
}
@@ -212,12 +206,8 @@ static int sp_platform_resume(struct platform_device *pdev)
static struct platform_driver sp_platform_driver = {
.driver = {
.name = "ccp",
-#ifdef CONFIG_ACPI
.acpi_match_table = sp_acpi_match,
-#endif
-#ifdef CONFIG_OF
.of_match_table = sp_of_match,
-#endif
},
.probe = sp_platform_probe,
.remove_new = sp_platform_remove,
diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
index cd67fa348ca7..1b9b7bccdeff 100644
--- a/drivers/crypto/hisilicon/debugfs.c
+++ b/drivers/crypto/hisilicon/debugfs.c
@@ -13,6 +13,7 @@
#define QM_DFX_COMMON_LEN 0xC3
#define QM_DFX_REGS_LEN 4UL
#define QM_DBG_TMP_BUF_LEN 22
+#define QM_XQC_ADDR_MASK GENMASK(31, 0)
#define CURRENT_FUN_MASK GENMASK(5, 0)
#define CURRENT_Q_MASK GENMASK(31, 16)
#define QM_SQE_ADDR_MASK GENMASK(7, 0)
@@ -167,7 +168,6 @@ static void dump_show(struct hisi_qm *qm, void *info,
static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
{
struct device *dev = &qm->pdev->dev;
- struct qm_sqc *sqc_curr;
struct qm_sqc sqc;
u32 qp_id;
int ret;
@@ -183,6 +183,8 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 1);
if (!ret) {
+ sqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
+ sqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
dump_show(qm, &sqc, sizeof(struct qm_sqc), name);
return 0;
@@ -190,9 +192,10 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
down_read(&qm->qps_lock);
if (qm->sqc) {
- sqc_curr = qm->sqc + qp_id;
-
- dump_show(qm, sqc_curr, sizeof(*sqc_curr), "SOFT SQC");
+ memcpy(&sqc, qm->sqc + qp_id * sizeof(struct qm_sqc), sizeof(struct qm_sqc));
+ sqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
+ sqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
+ dump_show(qm, &sqc, sizeof(struct qm_sqc), "SOFT SQC");
}
up_read(&qm->qps_lock);
@@ -202,7 +205,6 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
{
struct device *dev = &qm->pdev->dev;
- struct qm_cqc *cqc_curr;
struct qm_cqc cqc;
u32 qp_id;
int ret;
@@ -218,6 +220,8 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1);
if (!ret) {
+ cqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
+ cqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
dump_show(qm, &cqc, sizeof(struct qm_cqc), name);
return 0;
@@ -225,9 +229,10 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
down_read(&qm->qps_lock);
if (qm->cqc) {
- cqc_curr = qm->cqc + qp_id;
-
- dump_show(qm, cqc_curr, sizeof(*cqc_curr), "SOFT CQC");
+ memcpy(&cqc, qm->cqc + qp_id * sizeof(struct qm_cqc), sizeof(struct qm_cqc));
+ cqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
+ cqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
+ dump_show(qm, &cqc, sizeof(struct qm_cqc), "SOFT CQC");
}
up_read(&qm->qps_lock);
@@ -263,6 +268,10 @@ static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name)
if (ret)
return ret;
+ aeqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
+ aeqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
+ eqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
+ eqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
dump_show(qm, xeqc, size, name);
return ret;
@@ -310,27 +319,26 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s,
static int qm_sq_dump(struct hisi_qm *qm, char *s, char *name)
{
- u16 sq_depth = qm->qp_array->cq_depth;
- void *sqe, *sqe_curr;
+ u16 sq_depth = qm->qp_array->sq_depth;
struct hisi_qp *qp;
u32 qp_id, sqe_id;
+ void *sqe;
int ret;
ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth);
if (ret)
return ret;
- sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL);
+ sqe = kzalloc(qm->sqe_size, GFP_KERNEL);
if (!sqe)
return -ENOMEM;
qp = &qm->qp_array[qp_id];
- memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth);
- sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
- memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
+ memcpy(sqe, qp->sqe + sqe_id * qm->sqe_size, qm->sqe_size);
+ memset(sqe + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
qm->debug.sqe_mask_len);
- dump_show(qm, sqe_curr, qm->sqe_size, name);
+ dump_show(qm, sqe, qm->sqe_size, name);
kfree(sqe);
@@ -809,8 +817,14 @@ static void dfx_regs_uninit(struct hisi_qm *qm,
{
int i;
+ if (!dregs)
+ return;
+
/* Setting the pointer is NULL to prevent double free */
for (i = 0; i < reg_len; i++) {
+ if (!dregs[i].regs)
+ continue;
+
kfree(dregs[i].regs);
dregs[i].regs = NULL;
}
@@ -860,14 +874,21 @@ alloc_error:
static int qm_diff_regs_init(struct hisi_qm *qm,
struct dfx_diff_registers *dregs, u32 reg_len)
{
+ int ret;
+
qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
- if (IS_ERR(qm->debug.qm_diff_regs))
- return PTR_ERR(qm->debug.qm_diff_regs);
+ if (IS_ERR(qm->debug.qm_diff_regs)) {
+ ret = PTR_ERR(qm->debug.qm_diff_regs);
+ qm->debug.qm_diff_regs = NULL;
+ return ret;
+ }
qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len);
if (IS_ERR(qm->debug.acc_diff_regs)) {
dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
- return PTR_ERR(qm->debug.acc_diff_regs);
+ ret = PTR_ERR(qm->debug.acc_diff_regs);
+ qm->debug.acc_diff_regs = NULL;
+ return ret;
}
return 0;
@@ -908,7 +929,9 @@ static int qm_last_regs_init(struct hisi_qm *qm)
static void qm_diff_regs_uninit(struct hisi_qm *qm, u32 reg_len)
{
dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len);
+ qm->debug.acc_diff_regs = NULL;
dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
+ qm->debug.qm_diff_regs = NULL;
}
/**
@@ -1075,12 +1098,12 @@ static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
{
struct debugfs_file *file = qm->debug.files + index;
- debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
- &qm_debug_fops);
-
file->index = index;
mutex_init(&file->lock);
file->debug = &qm->debug;
+
+ debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
+ &qm_debug_fops);
}
static int qm_debugfs_atomic64_set(void *data, u64 val)
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index d93aa6630a57..10aa4da93323 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -106,7 +106,7 @@
#define HPRE_SHAPER_TYPE_RATE 640
#define HPRE_VIA_MSI_DSM 1
#define HPRE_SQE_MASK_OFFSET 8
-#define HPRE_SQE_MASK_LEN 24
+#define HPRE_SQE_MASK_LEN 44
#define HPRE_CTX_Q_NUM_DEF 1
#define HPRE_DFX_BASE 0x301000
@@ -1074,41 +1074,40 @@ static int hpre_debugfs_init(struct hisi_qm *qm)
struct device *dev = &qm->pdev->dev;
int ret;
- qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
- hpre_debugfs_root);
-
- qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
- qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
ret = hisi_qm_regs_debugfs_init(qm, hpre_diff_regs, ARRAY_SIZE(hpre_diff_regs));
if (ret) {
dev_warn(dev, "Failed to init HPRE diff regs!\n");
- goto debugfs_remove;
+ return ret;
}
+ qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
+ hpre_debugfs_root);
+ qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
+ qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
+
hisi_qm_debug_init(qm);
if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) {
ret = hpre_ctrl_debug_init(qm);
if (ret)
- goto failed_to_create;
+ goto debugfs_remove;
}
hpre_dfx_debug_init(qm);
return 0;
-failed_to_create:
- hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
debugfs_remove:
debugfs_remove_recursive(qm->debug.debug_root);
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
return ret;
}
static void hpre_debugfs_exit(struct hisi_qm *qm)
{
- hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
-
debugfs_remove_recursive(qm->debug.debug_root);
+
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
}
static int hpre_pre_store_cap_reg(struct hisi_qm *qm)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 92f0a1d9b4a6..3dac8d8e8568 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -645,6 +645,9 @@ int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op
tmp_xqc = qm->xqc_buf.aeqc;
xqc_dma = qm->xqc_buf.aeqc_dma;
break;
+ default:
+ dev_err(&qm->pdev->dev, "unknown mailbox cmd %u\n", cmd);
+ return -EINVAL;
}
/* Setting xqc will fail if master OOO is blocked. */
@@ -2893,12 +2896,9 @@ void hisi_qm_uninit(struct hisi_qm *qm)
hisi_qm_set_state(qm, QM_NOT_READY);
up_write(&qm->qps_lock);
+ qm_remove_uacce(qm);
qm_irqs_unregister(qm);
hisi_qm_pci_uninit(qm);
- if (qm->use_sva) {
- uacce_remove(qm->uacce);
- qm->uacce = NULL;
- }
}
EXPORT_SYMBOL_GPL(hisi_qm_uninit);
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 93a972fcbf63..0558f98e221f 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -481,8 +481,10 @@ static void sec_alg_resource_free(struct sec_ctx *ctx,
if (ctx->pbuf_supported)
sec_free_pbuf_resource(dev, qp_ctx->res);
- if (ctx->alg_type == SEC_AEAD)
+ if (ctx->alg_type == SEC_AEAD) {
sec_free_mac_resource(dev, qp_ctx->res);
+ sec_free_aiv_resource(dev, qp_ctx->res);
+ }
}
static int sec_alloc_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index c290d8937b19..75aad04ffe5e 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -99,8 +99,8 @@
#define SEC_DBGFS_VAL_MAX_LEN 20
#define SEC_SINGLE_PORT_MAX_TRANS 0x2060
-#define SEC_SQE_MASK_OFFSET 64
-#define SEC_SQE_MASK_LEN 48
+#define SEC_SQE_MASK_OFFSET 16
+#define SEC_SQE_MASK_LEN 108
#define SEC_SHAPER_TYPE_RATE 400
#define SEC_DFX_BASE 0x301000
@@ -152,7 +152,7 @@ static const struct hisi_qm_cap_info sec_basic_info[] = {
{SEC_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x1, 0x1, 0x1},
{SEC_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x4, 0x4, 0x4},
{SEC_CORES_PER_CLUSTER_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x4, 0x4, 0x4},
- {SEC_CORE_ENABLE_BITMAP, 0x3140, 32, GENMASK(31, 0), 0x17F, 0x17F, 0xF},
+ {SEC_CORE_ENABLE_BITMAP, 0x3140, 0, GENMASK(31, 0), 0x17F, 0x17F, 0xF},
{SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x18670CF},
{SEC_DRV_ALG_BITMAP_HIGH, 0x3148, 0, GENMASK(31, 0), 0x395C, 0x395C, 0x395C},
{SEC_DEV_ALG_BITMAP_LOW, 0x314c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
@@ -901,37 +901,36 @@ static int sec_debugfs_init(struct hisi_qm *qm)
struct device *dev = &qm->pdev->dev;
int ret;
- qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
- sec_debugfs_root);
- qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
- qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
-
ret = hisi_qm_regs_debugfs_init(qm, sec_diff_regs, ARRAY_SIZE(sec_diff_regs));
if (ret) {
dev_warn(dev, "Failed to init SEC diff regs!\n");
- goto debugfs_remove;
+ return ret;
}
+ qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
+ sec_debugfs_root);
+ qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
+ qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
+
hisi_qm_debug_init(qm);
ret = sec_debug_init(qm);
if (ret)
- goto failed_to_create;
+ goto debugfs_remove;
return 0;
-failed_to_create:
- hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
debugfs_remove:
- debugfs_remove_recursive(sec_debugfs_root);
+ debugfs_remove_recursive(qm->debug.debug_root);
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
return ret;
}
static void sec_debugfs_exit(struct hisi_qm *qm)
{
- hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
-
debugfs_remove_recursive(qm->debug.debug_root);
+
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
}
static int sec_show_last_regs_init(struct hisi_qm *qm)
@@ -1324,7 +1323,8 @@ static struct pci_driver sec_pci_driver = {
.probe = sec_probe,
.remove = sec_remove,
.err_handler = &sec_err_handler,
- .sriov_configure = hisi_qm_sriov_configure,
+ .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
+ hisi_qm_sriov_configure : NULL,
.shutdown = hisi_qm_dev_shutdown,
.driver.pm = &sec_pm_ops,
};
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index 0beca257c20b..568acd0aee3f 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -161,9 +161,6 @@ static struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool,
struct mem_block *block;
u32 block_index, offset;
- if (!pool || !hw_sgl_dma || index >= pool->count)
- return ERR_PTR(-EINVAL);
-
block = pool->mem_block;
block_index = index / pool->sgl_num_per_block;
offset = index % pool->sgl_num_per_block;
@@ -230,7 +227,7 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
struct scatterlist *sg;
int sg_n;
- if (!dev || !sgl || !pool || !hw_sgl_dma)
+ if (!dev || !sgl || !pool || !hw_sgl_dma || index >= pool->count)
return ERR_PTR(-EINVAL);
sg_n = sg_nents(sgl);
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index c065fd867161..c94a7b20d07e 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -887,36 +887,34 @@ static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm)
static int hisi_zip_debugfs_init(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- struct dentry *dev_d;
int ret;
- dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root);
-
- qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET;
- qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN;
- qm->debug.debug_root = dev_d;
ret = hisi_qm_regs_debugfs_init(qm, hzip_diff_regs, ARRAY_SIZE(hzip_diff_regs));
if (ret) {
dev_warn(dev, "Failed to init ZIP diff regs!\n");
- goto debugfs_remove;
+ return ret;
}
+ qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET;
+ qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN;
+ qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
+ hzip_debugfs_root);
+
hisi_qm_debug_init(qm);
if (qm->fun_type == QM_HW_PF) {
ret = hisi_zip_ctrl_debug_init(qm);
if (ret)
- goto failed_to_create;
+ goto debugfs_remove;
}
hisi_zip_dfx_debug_init(qm);
return 0;
-failed_to_create:
- hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
debugfs_remove:
- debugfs_remove_recursive(hzip_debugfs_root);
+ debugfs_remove_recursive(qm->debug.debug_root);
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
return ret;
}
@@ -940,10 +938,10 @@ static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
static void hisi_zip_debugfs_exit(struct hisi_qm *qm)
{
- hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
-
debugfs_remove_recursive(qm->debug.debug_root);
+ hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
+
if (qm->fun_type == QM_HW_PF) {
hisi_zip_debug_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
diff --git a/drivers/crypto/intel/iaa/iaa_crypto.h b/drivers/crypto/intel/iaa/iaa_crypto.h
index 2524091a5f70..56985e395263 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto.h
+++ b/drivers/crypto/intel/iaa/iaa_crypto.h
@@ -49,10 +49,10 @@ struct iaa_wq {
struct iaa_device *iaa_device;
- u64 comp_calls;
- u64 comp_bytes;
- u64 decomp_calls;
- u64 decomp_bytes;
+ atomic64_t comp_calls;
+ atomic64_t comp_bytes;
+ atomic64_t decomp_calls;
+ atomic64_t decomp_bytes;
};
struct iaa_device_compression_mode {
@@ -73,10 +73,10 @@ struct iaa_device {
int n_wq;
struct list_head wqs;
- u64 comp_calls;
- u64 comp_bytes;
- u64 decomp_calls;
- u64 decomp_bytes;
+ atomic64_t comp_calls;
+ atomic64_t comp_bytes;
+ atomic64_t decomp_calls;
+ atomic64_t decomp_bytes;
};
struct wq_table_entry {
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index b2191ade9011..e810d286ee8c 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -347,18 +347,16 @@ int add_iaa_compression_mode(const char *name,
goto free;
if (ll_table) {
- mode->ll_table = kzalloc(ll_table_size, GFP_KERNEL);
+ mode->ll_table = kmemdup(ll_table, ll_table_size, GFP_KERNEL);
if (!mode->ll_table)
goto free;
- memcpy(mode->ll_table, ll_table, ll_table_size);
mode->ll_table_size = ll_table_size;
}
if (d_table) {
- mode->d_table = kzalloc(d_table_size, GFP_KERNEL);
+ mode->d_table = kmemdup(d_table, d_table_size, GFP_KERNEL);
if (!mode->d_table)
goto free;
- memcpy(mode->d_table, d_table, d_table_size);
mode->d_table_size = d_table_size;
}
@@ -922,7 +920,7 @@ static void rebalance_wq_table(void)
for_each_node_with_cpus(node) {
node_cpus = cpumask_of_node(node);
- for (cpu = 0; cpu < nr_cpus_per_node; cpu++) {
+ for (cpu = 0; cpu < cpumask_weight(node_cpus); cpu++) {
int node_cpu = cpumask_nth(cpu, node_cpus);
if (WARN_ON(node_cpu >= nr_cpu_ids)) {
@@ -1079,8 +1077,8 @@ static void iaa_desc_complete(struct idxd_desc *idxd_desc,
update_total_comp_bytes_out(ctx->req->dlen);
update_wq_comp_bytes(iaa_wq->wq, ctx->req->dlen);
} else {
- update_total_decomp_bytes_in(ctx->req->dlen);
- update_wq_decomp_bytes(iaa_wq->wq, ctx->req->dlen);
+ update_total_decomp_bytes_in(ctx->req->slen);
+ update_wq_decomp_bytes(iaa_wq->wq, ctx->req->slen);
}
if (ctx->compress && compression_ctx->verify_compress) {
@@ -1498,7 +1496,6 @@ static int iaa_comp_acompress(struct acomp_req *req)
u32 compression_crc;
struct idxd_wq *wq;
struct device *dev;
- u64 start_time_ns;
int order = -1;
compression_ctx = crypto_tfm_ctx(tfm);
@@ -1572,10 +1569,8 @@ static int iaa_comp_acompress(struct acomp_req *req)
" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
req->dst, req->dlen, sg_dma_len(req->dst));
- start_time_ns = iaa_get_ts();
ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr,
&req->dlen, &compression_crc, disable_async);
- update_max_comp_delay_ns(start_time_ns);
if (ret == -EINPROGRESS)
return ret;
@@ -1622,7 +1617,6 @@ static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req)
struct iaa_wq *iaa_wq;
struct device *dev;
struct idxd_wq *wq;
- u64 start_time_ns;
int order = -1;
cpu = get_cpu();
@@ -1679,10 +1673,8 @@ alloc_dest:
dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
req->dst, req->dlen, sg_dma_len(req->dst));
- start_time_ns = iaa_get_ts();
ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
dst_addr, &req->dlen, true);
- update_max_decomp_delay_ns(start_time_ns);
if (ret == -EOVERFLOW) {
dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
req->dlen *= 2;
@@ -1713,7 +1705,6 @@ static int iaa_comp_adecompress(struct acomp_req *req)
int nr_sgs, cpu, ret = 0;
struct iaa_wq *iaa_wq;
struct device *dev;
- u64 start_time_ns;
struct idxd_wq *wq;
if (!iaa_crypto_enabled) {
@@ -1773,10 +1764,8 @@ static int iaa_comp_adecompress(struct acomp_req *req)
" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
req->dst, req->dlen, sg_dma_len(req->dst));
- start_time_ns = iaa_get_ts();
ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
dst_addr, &req->dlen, false);
- update_max_decomp_delay_ns(start_time_ns);
if (ret == -EINPROGRESS)
return ret;
@@ -2014,7 +2003,7 @@ static int __init iaa_crypto_init_module(void)
int ret = 0;
int node;
- nr_cpus = num_online_cpus();
+ nr_cpus = num_possible_cpus();
for_each_node_with_cpus(node)
nr_nodes++;
if (!nr_nodes) {
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.c b/drivers/crypto/intel/iaa/iaa_crypto_stats.c
index c9f83af4b307..f5cc3d29ca19 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_stats.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.c
@@ -17,141 +17,117 @@
#include "iaa_crypto.h"
#include "iaa_crypto_stats.h"
-static u64 total_comp_calls;
-static u64 total_decomp_calls;
-static u64 total_sw_decomp_calls;
-static u64 max_comp_delay_ns;
-static u64 max_decomp_delay_ns;
-static u64 total_comp_bytes_out;
-static u64 total_decomp_bytes_in;
-static u64 total_completion_einval_errors;
-static u64 total_completion_timeout_errors;
-static u64 total_completion_comp_buf_overflow_errors;
+static atomic64_t total_comp_calls;
+static atomic64_t total_decomp_calls;
+static atomic64_t total_sw_decomp_calls;
+static atomic64_t total_comp_bytes_out;
+static atomic64_t total_decomp_bytes_in;
+static atomic64_t total_completion_einval_errors;
+static atomic64_t total_completion_timeout_errors;
+static atomic64_t total_completion_comp_buf_overflow_errors;
static struct dentry *iaa_crypto_debugfs_root;
void update_total_comp_calls(void)
{
- total_comp_calls++;
+ atomic64_inc(&total_comp_calls);
}
void update_total_comp_bytes_out(int n)
{
- total_comp_bytes_out += n;
+ atomic64_add(n, &total_comp_bytes_out);
}
void update_total_decomp_calls(void)
{
- total_decomp_calls++;
+ atomic64_inc(&total_decomp_calls);
}
void update_total_sw_decomp_calls(void)
{
- total_sw_decomp_calls++;
+ atomic64_inc(&total_sw_decomp_calls);
}
void update_total_decomp_bytes_in(int n)
{
- total_decomp_bytes_in += n;
+ atomic64_add(n, &total_decomp_bytes_in);
}
void update_completion_einval_errs(void)
{
- total_completion_einval_errors++;
+ atomic64_inc(&total_completion_einval_errors);
}
void update_completion_timeout_errs(void)
{
- total_completion_timeout_errors++;
+ atomic64_inc(&total_completion_timeout_errors);
}
void update_completion_comp_buf_overflow_errs(void)
{
- total_completion_comp_buf_overflow_errors++;
-}
-
-void update_max_comp_delay_ns(u64 start_time_ns)
-{
- u64 time_diff;
-
- time_diff = ktime_get_ns() - start_time_ns;
-
- if (time_diff > max_comp_delay_ns)
- max_comp_delay_ns = time_diff;
-}
-
-void update_max_decomp_delay_ns(u64 start_time_ns)
-{
- u64 time_diff;
-
- time_diff = ktime_get_ns() - start_time_ns;
-
- if (time_diff > max_decomp_delay_ns)
- max_decomp_delay_ns = time_diff;
+ atomic64_inc(&total_completion_comp_buf_overflow_errors);
}
void update_wq_comp_calls(struct idxd_wq *idxd_wq)
{
struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
- wq->comp_calls++;
- wq->iaa_device->comp_calls++;
+ atomic64_inc(&wq->comp_calls);
+ atomic64_inc(&wq->iaa_device->comp_calls);
}
void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n)
{
struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
- wq->comp_bytes += n;
- wq->iaa_device->comp_bytes += n;
+ atomic64_add(n, &wq->comp_bytes);
+ atomic64_add(n, &wq->iaa_device->comp_bytes);
}
void update_wq_decomp_calls(struct idxd_wq *idxd_wq)
{
struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
- wq->decomp_calls++;
- wq->iaa_device->decomp_calls++;
+ atomic64_inc(&wq->decomp_calls);
+ atomic64_inc(&wq->iaa_device->decomp_calls);
}
void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n)
{
struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
- wq->decomp_bytes += n;
- wq->iaa_device->decomp_bytes += n;
+ atomic64_add(n, &wq->decomp_bytes);
+ atomic64_add(n, &wq->iaa_device->decomp_bytes);
}
static void reset_iaa_crypto_stats(void)
{
- total_comp_calls = 0;
- total_decomp_calls = 0;
- total_sw_decomp_calls = 0;
- max_comp_delay_ns = 0;
- max_decomp_delay_ns = 0;
- total_comp_bytes_out = 0;
- total_decomp_bytes_in = 0;
- total_completion_einval_errors = 0;
- total_completion_timeout_errors = 0;
- total_completion_comp_buf_overflow_errors = 0;
+ atomic64_set(&total_comp_calls, 0);
+ atomic64_set(&total_decomp_calls, 0);
+ atomic64_set(&total_sw_decomp_calls, 0);
+ atomic64_set(&total_comp_bytes_out, 0);
+ atomic64_set(&total_decomp_bytes_in, 0);
+ atomic64_set(&total_completion_einval_errors, 0);
+ atomic64_set(&total_completion_timeout_errors, 0);
+ atomic64_set(&total_completion_comp_buf_overflow_errors, 0);
}
static void reset_wq_stats(struct iaa_wq *wq)
{
- wq->comp_calls = 0;
- wq->comp_bytes = 0;
- wq->decomp_calls = 0;
- wq->decomp_bytes = 0;
+ atomic64_set(&wq->comp_calls, 0);
+ atomic64_set(&wq->comp_bytes, 0);
+ atomic64_set(&wq->decomp_calls, 0);
+ atomic64_set(&wq->decomp_bytes, 0);
}
static void reset_device_stats(struct iaa_device *iaa_device)
{
struct iaa_wq *iaa_wq;
- iaa_device->comp_calls = 0;
- iaa_device->comp_bytes = 0;
- iaa_device->decomp_calls = 0;
- iaa_device->decomp_bytes = 0;
+ atomic64_set(&iaa_device->comp_calls, 0);
+ atomic64_set(&iaa_device->comp_bytes, 0);
+ atomic64_set(&iaa_device->decomp_calls, 0);
+ atomic64_set(&iaa_device->decomp_bytes, 0);
list_for_each_entry(iaa_wq, &iaa_device->wqs, list)
reset_wq_stats(iaa_wq);
@@ -160,10 +136,14 @@ static void reset_device_stats(struct iaa_device *iaa_device)
static void wq_show(struct seq_file *m, struct iaa_wq *iaa_wq)
{
seq_printf(m, " name: %s\n", iaa_wq->wq->name);
- seq_printf(m, " comp_calls: %llu\n", iaa_wq->comp_calls);
- seq_printf(m, " comp_bytes: %llu\n", iaa_wq->comp_bytes);
- seq_printf(m, " decomp_calls: %llu\n", iaa_wq->decomp_calls);
- seq_printf(m, " decomp_bytes: %llu\n\n", iaa_wq->decomp_bytes);
+ seq_printf(m, " comp_calls: %llu\n",
+ atomic64_read(&iaa_wq->comp_calls));
+ seq_printf(m, " comp_bytes: %llu\n",
+ atomic64_read(&iaa_wq->comp_bytes));
+ seq_printf(m, " decomp_calls: %llu\n",
+ atomic64_read(&iaa_wq->decomp_calls));
+ seq_printf(m, " decomp_bytes: %llu\n\n",
+ atomic64_read(&iaa_wq->decomp_bytes));
}
static void device_stats_show(struct seq_file *m, struct iaa_device *iaa_device)
@@ -173,30 +153,41 @@ static void device_stats_show(struct seq_file *m, struct iaa_device *iaa_device)
seq_puts(m, "iaa device:\n");
seq_printf(m, " id: %d\n", iaa_device->idxd->id);
seq_printf(m, " n_wqs: %d\n", iaa_device->n_wq);
- seq_printf(m, " comp_calls: %llu\n", iaa_device->comp_calls);
- seq_printf(m, " comp_bytes: %llu\n", iaa_device->comp_bytes);
- seq_printf(m, " decomp_calls: %llu\n", iaa_device->decomp_calls);
- seq_printf(m, " decomp_bytes: %llu\n", iaa_device->decomp_bytes);
+ seq_printf(m, " comp_calls: %llu\n",
+ atomic64_read(&iaa_device->comp_calls));
+ seq_printf(m, " comp_bytes: %llu\n",
+ atomic64_read(&iaa_device->comp_bytes));
+ seq_printf(m, " decomp_calls: %llu\n",
+ atomic64_read(&iaa_device->decomp_calls));
+ seq_printf(m, " decomp_bytes: %llu\n",
+ atomic64_read(&iaa_device->decomp_bytes));
seq_puts(m, " wqs:\n");
list_for_each_entry(iaa_wq, &iaa_device->wqs, list)
wq_show(m, iaa_wq);
}
-static void global_stats_show(struct seq_file *m)
+static int global_stats_show(struct seq_file *m, void *v)
{
seq_puts(m, "global stats:\n");
- seq_printf(m, " total_comp_calls: %llu\n", total_comp_calls);
- seq_printf(m, " total_decomp_calls: %llu\n", total_decomp_calls);
- seq_printf(m, " total_sw_decomp_calls: %llu\n", total_sw_decomp_calls);
- seq_printf(m, " total_comp_bytes_out: %llu\n", total_comp_bytes_out);
- seq_printf(m, " total_decomp_bytes_in: %llu\n", total_decomp_bytes_in);
+ seq_printf(m, " total_comp_calls: %llu\n",
+ atomic64_read(&total_comp_calls));
+ seq_printf(m, " total_decomp_calls: %llu\n",
+ atomic64_read(&total_decomp_calls));
+ seq_printf(m, " total_sw_decomp_calls: %llu\n",
+ atomic64_read(&total_sw_decomp_calls));
+ seq_printf(m, " total_comp_bytes_out: %llu\n",
+ atomic64_read(&total_comp_bytes_out));
+ seq_printf(m, " total_decomp_bytes_in: %llu\n",
+ atomic64_read(&total_decomp_bytes_in));
seq_printf(m, " total_completion_einval_errors: %llu\n",
- total_completion_einval_errors);
+ atomic64_read(&total_completion_einval_errors));
seq_printf(m, " total_completion_timeout_errors: %llu\n",
- total_completion_timeout_errors);
+ atomic64_read(&total_completion_timeout_errors));
seq_printf(m, " total_completion_comp_buf_overflow_errors: %llu\n\n",
- total_completion_comp_buf_overflow_errors);
+ atomic64_read(&total_completion_comp_buf_overflow_errors));
+
+ return 0;
}
static int wq_stats_show(struct seq_file *m, void *v)
@@ -205,8 +196,6 @@ static int wq_stats_show(struct seq_file *m, void *v)
mutex_lock(&iaa_devices_lock);
- global_stats_show(m);
-
list_for_each_entry(iaa_device, &iaa_devices, list)
device_stats_show(m, iaa_device);
@@ -243,6 +232,18 @@ static const struct file_operations wq_stats_fops = {
.release = single_release,
};
+static int global_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, global_stats_show, file);
+}
+
+static const struct file_operations global_stats_fops = {
+ .open = global_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
DEFINE_DEBUGFS_ATTRIBUTE(wq_stats_reset_fops, NULL, iaa_crypto_stats_reset, "%llu\n");
int __init iaa_crypto_debugfs_init(void)
@@ -252,20 +253,8 @@ int __init iaa_crypto_debugfs_init(void)
iaa_crypto_debugfs_root = debugfs_create_dir("iaa_crypto", NULL);
- debugfs_create_u64("max_comp_delay_ns", 0644,
- iaa_crypto_debugfs_root, &max_comp_delay_ns);
- debugfs_create_u64("max_decomp_delay_ns", 0644,
- iaa_crypto_debugfs_root, &max_decomp_delay_ns);
- debugfs_create_u64("total_comp_calls", 0644,
- iaa_crypto_debugfs_root, &total_comp_calls);
- debugfs_create_u64("total_decomp_calls", 0644,
- iaa_crypto_debugfs_root, &total_decomp_calls);
- debugfs_create_u64("total_sw_decomp_calls", 0644,
- iaa_crypto_debugfs_root, &total_sw_decomp_calls);
- debugfs_create_u64("total_comp_bytes_out", 0644,
- iaa_crypto_debugfs_root, &total_comp_bytes_out);
- debugfs_create_u64("total_decomp_bytes_in", 0644,
- iaa_crypto_debugfs_root, &total_decomp_bytes_in);
+ debugfs_create_file("global_stats", 0644, iaa_crypto_debugfs_root, NULL,
+ &global_stats_fops);
debugfs_create_file("wq_stats", 0644, iaa_crypto_debugfs_root, NULL,
&wq_stats_fops);
debugfs_create_file("stats_reset", 0644, iaa_crypto_debugfs_root, NULL,
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.h b/drivers/crypto/intel/iaa/iaa_crypto_stats.h
index c916ca83f070..3787a5f507eb 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_stats.h
+++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.h
@@ -13,8 +13,6 @@ void update_total_comp_bytes_out(int n);
void update_total_decomp_calls(void);
void update_total_sw_decomp_calls(void);
void update_total_decomp_bytes_in(int n);
-void update_max_comp_delay_ns(u64 start_time_ns);
-void update_max_decomp_delay_ns(u64 start_time_ns);
void update_completion_einval_errs(void);
void update_completion_timeout_errs(void);
void update_completion_comp_buf_overflow_errs(void);
@@ -24,8 +22,6 @@ void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n);
void update_wq_decomp_calls(struct idxd_wq *idxd_wq);
void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n);
-static inline u64 iaa_get_ts(void) { return ktime_get_ns(); }
-
#else
static inline int iaa_crypto_debugfs_init(void) { return 0; }
static inline void iaa_crypto_debugfs_cleanup(void) {}
@@ -35,8 +31,6 @@ static inline void update_total_comp_bytes_out(int n) {}
static inline void update_total_decomp_calls(void) {}
static inline void update_total_sw_decomp_calls(void) {}
static inline void update_total_decomp_bytes_in(int n) {}
-static inline void update_max_comp_delay_ns(u64 start_time_ns) {}
-static inline void update_max_decomp_delay_ns(u64 start_time_ns) {}
static inline void update_completion_einval_errs(void) {}
static inline void update_completion_timeout_errs(void) {}
static inline void update_completion_comp_buf_overflow_errs(void) {}
@@ -46,8 +40,6 @@ static inline void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n) {}
static inline void update_wq_decomp_calls(struct idxd_wq *idxd_wq) {}
static inline void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n) {}
-static inline u64 iaa_get_ts(void) { return 0; }
-
#endif // CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS
#endif
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
index 1102c47f8293..78f0ea49254d 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -10,12 +10,14 @@
#include <adf_fw_config.h>
#include <adf_gen4_config.h>
#include <adf_gen4_dc.h>
+#include <adf_gen4_hw_csr_data.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
#include <adf_gen4_ras.h>
#include <adf_gen4_timer.h>
#include <adf_gen4_tl.h>
+#include <adf_gen4_vf_mig.h>
#include "adf_420xx_hw_data.h"
#include "icp_qat_hw.h"
@@ -296,7 +298,7 @@ static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
{
if (adf_gen4_init_thd2arb_map(accel_dev))
dev_warn(&GET_DEV(accel_dev),
- "Generate of the thread to arbiter map failed");
+ "Failed to generate thread to arbiter mapping");
return GET_HW_DATA(accel_dev)->thd_to_arb_map;
}
@@ -487,6 +489,7 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
adf_gen4_init_dc_ops(&hw_data->dc_ops);
adf_gen4_init_ras_ops(&hw_data->ras_ops);
adf_gen4_init_tl_data(&hw_data->tl_data);
+ adf_gen4_init_vf_mig_ops(&hw_data->vfmig_ops);
adf_init_rl_data(&hw_data->rl_data);
}
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 927506cf271d..9fd7ec53b9f3 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -10,12 +10,14 @@
#include <adf_fw_config.h>
#include <adf_gen4_config.h>
#include <adf_gen4_dc.h>
+#include <adf_gen4_hw_csr_data.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
#include "adf_gen4_ras.h"
#include <adf_gen4_timer.h>
#include <adf_gen4_tl.h>
+#include <adf_gen4_vf_mig.h>
#include "adf_4xxx_hw_data.h"
#include "icp_qat_hw.h"
@@ -208,7 +210,7 @@ static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
{
if (adf_gen4_init_thd2arb_map(accel_dev))
dev_warn(&GET_DEV(accel_dev),
- "Generate of the thread to arbiter map failed");
+ "Failed to generate thread to arbiter mapping");
return GET_HW_DATA(accel_dev)->thd_to_arb_map;
}
@@ -454,6 +456,8 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map;
hw_data->disable_iov = adf_disable_sriov;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
+ hw_data->bank_state_save = adf_gen4_bank_state_save;
+ hw_data->bank_state_restore = adf_gen4_bank_state_restore;
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
hw_data->dev_config = adf_gen4_dev_config;
@@ -469,6 +473,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
adf_gen4_init_dc_ops(&hw_data->dc_ops);
adf_gen4_init_ras_ops(&hw_data->ras_ops);
adf_gen4_init_tl_data(&hw_data->tl_data);
+ adf_gen4_init_vf_mig_ops(&hw_data->vfmig_ops);
adf_init_rl_data(&hw_data->rl_data);
}
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
index 9762f2bf7727..d26564cebdec 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -197,7 +197,9 @@ module_pci_driver(adf_driver);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_FIRMWARE(ADF_4XXX_FW);
+MODULE_FIRMWARE(ADF_402XX_FW);
MODULE_FIRMWARE(ADF_4XXX_MMP);
+MODULE_FIRMWARE(ADF_402XX_MMP);
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
MODULE_SOFTDEP("pre: crypto-intel_qat");
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index a882e0ea2279..201f9412c582 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -6,6 +6,7 @@
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_c3xxx_hw_data.h"
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
index 84d9486e04de..a512ca4efd3f 100644
--- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
@@ -4,6 +4,7 @@
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include <adf_pfvf_vf_msg.h>
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
index 48cf3eb7c734..6b5b0cf9c7c7 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
@@ -6,6 +6,7 @@
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_c62x_hw_data.h"
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
index 751d7aa57fc7..4aaaaf921734 100644
--- a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
@@ -4,6 +4,7 @@
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include <adf_pfvf_vf_msg.h>
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 5915cde8a7aa..6f9266edc9f1 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -14,16 +14,20 @@ intel_qat-objs := adf_cfg.o \
adf_hw_arbiter.o \
adf_sysfs.o \
adf_sysfs_ras_counters.o \
+ adf_gen2_hw_csr_data.o \
adf_gen2_hw_data.o \
adf_gen2_config.o \
adf_gen4_config.o \
+ adf_gen4_hw_csr_data.o \
adf_gen4_hw_data.o \
+ adf_gen4_vf_mig.o \
adf_gen4_pm.o \
adf_gen2_dc.o \
adf_gen4_dc.o \
adf_gen4_ras.o \
adf_gen4_timer.o \
adf_clock.o \
+ adf_mstate_mgr.o \
qat_crypto.o \
qat_compression.o \
qat_comp_algs.o \
@@ -52,6 +56,6 @@ intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \
adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \
- adf_gen2_pfvf.o adf_gen4_pfvf.o
+ adf_gen2_pfvf.o adf_gen4_pfvf.o qat_mig_dev.o
intel_qat-$(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION) += adf_heartbeat_inject.o
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index 08658c3a01e9..7830ecb1a1f1 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -9,6 +9,7 @@
#include <linux/pci.h>
#include <linux/ratelimit.h>
#include <linux/types.h>
+#include <linux/qat/qat_mig_dev.h>
#include "adf_cfg_common.h"
#include "adf_rl.h"
#include "adf_telemetry.h"
@@ -140,6 +141,40 @@ struct admin_info {
u32 mailbox_offset;
};
+struct ring_config {
+ u64 base;
+ u32 config;
+ u32 head;
+ u32 tail;
+ u32 reserved0;
+};
+
+struct bank_state {
+ u32 ringstat0;
+ u32 ringstat1;
+ u32 ringuostat;
+ u32 ringestat;
+ u32 ringnestat;
+ u32 ringnfstat;
+ u32 ringfstat;
+ u32 ringcstat0;
+ u32 ringcstat1;
+ u32 ringcstat2;
+ u32 ringcstat3;
+ u32 iaintflagen;
+ u32 iaintflagreg;
+ u32 iaintflagsrcsel0;
+ u32 iaintflagsrcsel1;
+ u32 iaintcolen;
+ u32 iaintcolctl;
+ u32 iaintflagandcolen;
+ u32 ringexpstat;
+ u32 ringexpintenable;
+ u32 ringsrvarben;
+ u32 reserved0;
+ struct ring_config rings[ADF_ETR_MAX_RINGS_PER_BANK];
+};
+
struct adf_hw_csr_ops {
u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size);
u32 (*read_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
@@ -150,22 +185,49 @@ struct adf_hw_csr_ops {
u32 ring);
void (*write_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
u32 ring, u32 value);
+ u32 (*read_csr_stat)(void __iomem *csr_base_addr, u32 bank);
+ u32 (*read_csr_uo_stat)(void __iomem *csr_base_addr, u32 bank);
u32 (*read_csr_e_stat)(void __iomem *csr_base_addr, u32 bank);
+ u32 (*read_csr_ne_stat)(void __iomem *csr_base_addr, u32 bank);
+ u32 (*read_csr_nf_stat)(void __iomem *csr_base_addr, u32 bank);
+ u32 (*read_csr_f_stat)(void __iomem *csr_base_addr, u32 bank);
+ u32 (*read_csr_c_stat)(void __iomem *csr_base_addr, u32 bank);
+ u32 (*read_csr_exp_stat)(void __iomem *csr_base_addr, u32 bank);
+ u32 (*read_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank);
+ void (*write_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank,
+ u32 value);
+ u32 (*read_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
+ u32 ring);
void (*write_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
u32 ring, u32 value);
+ dma_addr_t (*read_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
+ u32 ring);
void (*write_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
u32 ring, dma_addr_t addr);
+ u32 (*read_csr_int_en)(void __iomem *csr_base_addr, u32 bank);
+ void (*write_csr_int_en)(void __iomem *csr_base_addr, u32 bank,
+ u32 value);
+ u32 (*read_csr_int_flag)(void __iomem *csr_base_addr, u32 bank);
void (*write_csr_int_flag)(void __iomem *csr_base_addr, u32 bank,
u32 value);
+ u32 (*read_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
void (*write_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
+ void (*write_csr_int_srcsel_w_val)(void __iomem *csr_base_addr,
+ u32 bank, u32 value);
+ u32 (*read_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank);
void (*write_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank,
u32 value);
+ u32 (*read_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank);
void (*write_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank,
u32 value);
+ u32 (*read_csr_int_flag_and_col)(void __iomem *csr_base_addr,
+ u32 bank);
void (*write_csr_int_flag_and_col)(void __iomem *csr_base_addr,
u32 bank, u32 value);
+ u32 (*read_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank);
void (*write_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank,
u32 value);
+ u32 (*get_int_col_ctl_enable_mask)(void);
};
struct adf_cfg_device_data;
@@ -197,6 +259,20 @@ struct adf_dc_ops {
void (*build_deflate_ctx)(void *ctx);
};
+struct qat_migdev_ops {
+ int (*init)(struct qat_mig_dev *mdev);
+ void (*cleanup)(struct qat_mig_dev *mdev);
+ void (*reset)(struct qat_mig_dev *mdev);
+ int (*open)(struct qat_mig_dev *mdev);
+ void (*close)(struct qat_mig_dev *mdev);
+ int (*suspend)(struct qat_mig_dev *mdev);
+ int (*resume)(struct qat_mig_dev *mdev);
+ int (*save_state)(struct qat_mig_dev *mdev);
+ int (*save_setup)(struct qat_mig_dev *mdev);
+ int (*load_state)(struct qat_mig_dev *mdev);
+ int (*load_setup)(struct qat_mig_dev *mdev, int size);
+};
+
struct adf_dev_err_mask {
u32 cppagentcmdpar_mask;
u32 parerr_ath_cph_mask;
@@ -244,6 +320,10 @@ struct adf_hw_device_data {
void (*enable_ints)(struct adf_accel_dev *accel_dev);
void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
+ int (*bank_state_save)(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct bank_state *state);
+ int (*bank_state_restore)(struct adf_accel_dev *accel_dev,
+ u32 bank_number, struct bank_state *state);
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
@@ -260,6 +340,7 @@ struct adf_hw_device_data {
struct adf_dev_err_mask dev_err_mask;
struct adf_rl_hw_data rl_data;
struct adf_tl_hw_data tl_data;
+ struct qat_migdev_ops vfmig_ops;
const char *fw_name;
const char *fw_mmp_name;
u32 fuses;
@@ -316,6 +397,7 @@ struct adf_hw_device_data {
#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
#define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
#define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops)
+#define GET_VFMIG_OPS(accel_dev) (&(accel_dev)->hw_device->vfmig_ops)
#define GET_TL_DATA(accel_dev) GET_HW_DATA(accel_dev)->tl_data
#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
@@ -330,11 +412,17 @@ struct adf_fw_loader_data {
struct adf_accel_vf_info {
struct adf_accel_dev *accel_dev;
struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
+ struct mutex pfvf_mig_lock; /* protects PFVF state for migration */
struct ratelimit_state vf2pf_ratelimit;
u32 vf_nr;
bool init;
bool restarting;
u8 vf_compat_ver;
+ /*
+ * Private area used for device migration.
+ * Memory allocation and free is managed by migration driver.
+ */
+ void *mig_priv;
};
struct adf_dc_data {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
index 57328249c89e..3bec9e20bad0 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -248,6 +248,16 @@ static inline void __iomem *adf_get_pmisc_base(struct adf_accel_dev *accel_dev)
return pmisc->virt_addr;
}
+static inline void __iomem *adf_get_etr_base(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *etr;
+
+ etr = &GET_BARS(accel_dev)[hw_data->get_etr_bar_id(hw_data)];
+
+ return etr->virt_addr;
+}
+
static inline void __iomem *adf_get_aram_base(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c
new file mode 100644
index 000000000000..650c9edd8a66
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024 Intel Corporation */
+#include <linux/types.h>
+#include "adf_gen2_hw_csr_data.h"
+
+static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
+{
+ return BUILD_RING_BASE_ADDR(addr, size);
+}
+
+static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+ return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ u32 value)
+{
+ WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+ return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ u32 value)
+{
+ WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_E_STAT(csr_base_addr, bank);
+}
+
+static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank,
+ u32 ring, u32 value)
+{
+ WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
+}
+
+static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ dma_addr_t addr)
+{
+ WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
+}
+
+static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value)
+{
+ WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
+{
+ WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
+}
+
+static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
+}
+
+static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
+}
+
+void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
+{
+ csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
+ csr_ops->read_csr_ring_head = read_csr_ring_head;
+ csr_ops->write_csr_ring_head = write_csr_ring_head;
+ csr_ops->read_csr_ring_tail = read_csr_ring_tail;
+ csr_ops->write_csr_ring_tail = write_csr_ring_tail;
+ csr_ops->read_csr_e_stat = read_csr_e_stat;
+ csr_ops->write_csr_ring_config = write_csr_ring_config;
+ csr_ops->write_csr_ring_base = write_csr_ring_base;
+ csr_ops->write_csr_int_flag = write_csr_int_flag;
+ csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
+ csr_ops->write_csr_int_col_en = write_csr_int_col_en;
+ csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
+ csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
+ csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h
new file mode 100644
index 000000000000..55058b0f9e52
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation */
+#ifndef ADF_GEN2_HW_CSR_DATA_H_
+#define ADF_GEN2_HW_CSR_DATA_H_
+
+#include <linux/bitops.h>
+#include "adf_accel_devices.h"
+
+#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
+#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
+#define ADF_RING_CSR_RING_CONFIG 0x000
+#define ADF_RING_CSR_RING_LBASE 0x040
+#define ADF_RING_CSR_RING_UBASE 0x080
+#define ADF_RING_CSR_RING_HEAD 0x0C0
+#define ADF_RING_CSR_RING_TAIL 0x100
+#define ADF_RING_CSR_E_STAT 0x14C
+#define ADF_RING_CSR_INT_FLAG 0x170
+#define ADF_RING_CSR_INT_SRCSEL 0x174
+#define ADF_RING_CSR_INT_SRCSEL_2 0x178
+#define ADF_RING_CSR_INT_COL_EN 0x17C
+#define ADF_RING_CSR_INT_COL_CTL 0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
+#define ADF_RING_BUNDLE_SIZE 0x1000
+#define ADF_ARB_REG_SLOT 0x1000
+#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
+
+#define BUILD_RING_BASE_ADDR(addr, size) \
+ (((addr) >> 6) & (GENMASK_ULL(63, 0) << (size)))
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_HEAD + ((ring) << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_TAIL + ((ring) << 2))
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_E_STAT)
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
+do { \
+ u32 l_base = 0, u_base = 0; \
+ l_base = (u32)((value) & 0xFFFFFFFF); \
+ u_base = (u32)(((value) & 0xFFFFFFFF00000000ULL) >> 32); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_LBASE + ((ring) << 2), l_base); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_UBASE + ((ring) << 2), u_base); \
+} while (0)
+
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_FLAG, value)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+do { \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
+} while (0)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_COL_EN, value)
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_COL_CTL, \
+ ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+ ADF_RING_CSR_INT_FLAG_AND_COL, value)
+
+#define WRITE_CSR_RING_SRV_ARB_EN(csr_addr, index, value) \
+ ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
+ (ADF_ARB_REG_SLOT * (index)), value)
+
+void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
index d1884547b5a1..1f64bf49b221 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
@@ -111,103 +111,6 @@ void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev)
}
EXPORT_SYMBOL_GPL(adf_gen2_enable_ints);
-static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
-{
- return BUILD_RING_BASE_ADDR(addr, size);
-}
-
-static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
-{
- return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
-}
-
-static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
- u32 value)
-{
- WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
-}
-
-static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
-{
- return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
-}
-
-static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
- u32 value)
-{
- WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
-}
-
-static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
-{
- return READ_CSR_E_STAT(csr_base_addr, bank);
-}
-
-static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank,
- u32 ring, u32 value)
-{
- WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
-}
-
-static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
- dma_addr_t addr)
-{
- WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
-}
-
-static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value)
-{
- WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
-{
- WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
-}
-
-static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
-}
-
-static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
-}
-
-void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
-{
- csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
- csr_ops->read_csr_ring_head = read_csr_ring_head;
- csr_ops->write_csr_ring_head = write_csr_ring_head;
- csr_ops->read_csr_ring_tail = read_csr_ring_tail;
- csr_ops->write_csr_ring_tail = write_csr_ring_tail;
- csr_ops->read_csr_e_stat = read_csr_e_stat;
- csr_ops->write_csr_ring_config = write_csr_ring_config;
- csr_ops->write_csr_ring_base = write_csr_ring_base;
- csr_ops->write_csr_int_flag = write_csr_int_flag;
- csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
- csr_ops->write_csr_int_col_en = write_csr_int_col_en;
- csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
- csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
- csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
-}
-EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops);
-
u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
index 6bd341061de4..708e9186127b 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
@@ -6,78 +6,9 @@
#include "adf_accel_devices.h"
#include "adf_cfg_common.h"
-/* Transport access */
-#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
-#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
-#define ADF_RING_CSR_RING_CONFIG 0x000
-#define ADF_RING_CSR_RING_LBASE 0x040
-#define ADF_RING_CSR_RING_UBASE 0x080
-#define ADF_RING_CSR_RING_HEAD 0x0C0
-#define ADF_RING_CSR_RING_TAIL 0x100
-#define ADF_RING_CSR_E_STAT 0x14C
-#define ADF_RING_CSR_INT_FLAG 0x170
-#define ADF_RING_CSR_INT_SRCSEL 0x174
-#define ADF_RING_CSR_INT_SRCSEL_2 0x178
-#define ADF_RING_CSR_INT_COL_EN 0x17C
-#define ADF_RING_CSR_INT_COL_CTL 0x180
-#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
-#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
-#define ADF_RING_BUNDLE_SIZE 0x1000
#define ADF_GEN2_RX_RINGS_OFFSET 8
#define ADF_GEN2_TX_RINGS_MASK 0xFF
-#define BUILD_RING_BASE_ADDR(addr, size) \
- (((addr) >> 6) & (GENMASK_ULL(63, 0) << (size)))
-#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
- ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_RING_HEAD + ((ring) << 2))
-#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
- ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_RING_TAIL + ((ring) << 2))
-#define READ_CSR_E_STAT(csr_base_addr, bank) \
- ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_E_STAT)
-#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value)
-#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
-do { \
- u32 l_base = 0, u_base = 0; \
- l_base = (u32)((value) & 0xFFFFFFFF); \
- u_base = (u32)(((value) & 0xFFFFFFFF00000000ULL) >> 32); \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_RING_LBASE + ((ring) << 2), l_base); \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_RING_UBASE + ((ring) << 2), u_base); \
-} while (0)
-
-#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
-#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
-#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_INT_FLAG, value)
-#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
-do { \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
-} while (0)
-#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_INT_COL_EN, value)
-#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_INT_COL_CTL, \
- ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
-#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
- ADF_RING_CSR_INT_FLAG_AND_COL, value)
-
/* AE to function map */
#define AE2FUNCTION_MAP_A_OFFSET (0x3A400 + 0x190)
#define AE2FUNCTION_MAP_B_OFFSET (0x3A400 + 0x310)
@@ -106,12 +37,6 @@ do { \
#define ADF_ARB_OFFSET 0x30000
#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
#define ADF_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0))
-#define ADF_ARB_REG_SLOT 0x1000
-#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
-
-#define WRITE_CSR_RING_SRV_ARB_EN(csr_addr, index, value) \
- ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
- (ADF_ARB_REG_SLOT * (index)), value)
/* Power gating */
#define ADF_POWERGATE_DC BIT(23)
@@ -158,7 +83,6 @@ u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self);
void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev);
void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
int num_a_regs, int num_b_regs);
-void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info);
void adf_gen2_get_arb_info(struct arb_info *arb_info);
void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c
new file mode 100644
index 000000000000..6609c248aaba
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024 Intel Corporation */
+#include <linux/types.h>
+#include "adf_gen4_hw_csr_data.h"
+
+static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
+{
+ return BUILD_RING_BASE_ADDR(addr, size);
+}
+
+static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+ return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ u32 value)
+{
+ WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+ return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ u32 value)
+{
+ WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_uo_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_UO_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_E_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_ne_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_NE_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_nf_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_NF_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_f_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_F_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_c_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_C_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_exp_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_EXP_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_exp_int_en(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_EXP_INT_EN(csr_base_addr, bank);
+}
+
+static void write_csr_exp_int_en(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_EXP_INT_EN(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_ring_config(void __iomem *csr_base_addr, u32 bank,
+ u32 ring)
+{
+ return READ_CSR_RING_CONFIG(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ u32 value)
+{
+ WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
+}
+
+static dma_addr_t read_csr_ring_base(void __iomem *csr_base_addr, u32 bank,
+ u32 ring)
+{
+ return READ_CSR_RING_BASE(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
+ dma_addr_t addr)
+{
+ WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
+}
+
+static u32 read_csr_int_en(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_EN(csr_base_addr, bank);
+}
+
+static void write_csr_int_en(void __iomem *csr_base_addr, u32 bank, u32 value)
+{
+ WRITE_CSR_INT_EN(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_int_flag(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_FLAG(csr_base_addr, bank);
+}
+
+static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_SRCSEL(csr_base_addr, bank);
+}
+
+static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
+{
+ WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
+}
+
+static void write_csr_int_srcsel_w_val(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_SRCSEL_W_VAL(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_int_col_en(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_COL_EN(csr_base_addr, bank);
+}
+
+static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value)
+{
+ WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_COL_CTL(csr_base_addr, bank);
+}
+
+static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_FLAG_AND_COL(csr_base_addr, bank);
+}
+
+static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank);
+}
+
+static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
+ u32 value)
+{
+ WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
+}
+
+static u32 get_int_col_ctl_enable_mask(void)
+{
+ return ADF_RING_CSR_INT_COL_CTL_ENABLE;
+}
+
+void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
+{
+ csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
+ csr_ops->read_csr_ring_head = read_csr_ring_head;
+ csr_ops->write_csr_ring_head = write_csr_ring_head;
+ csr_ops->read_csr_ring_tail = read_csr_ring_tail;
+ csr_ops->write_csr_ring_tail = write_csr_ring_tail;
+ csr_ops->read_csr_stat = read_csr_stat;
+ csr_ops->read_csr_uo_stat = read_csr_uo_stat;
+ csr_ops->read_csr_e_stat = read_csr_e_stat;
+ csr_ops->read_csr_ne_stat = read_csr_ne_stat;
+ csr_ops->read_csr_nf_stat = read_csr_nf_stat;
+ csr_ops->read_csr_f_stat = read_csr_f_stat;
+ csr_ops->read_csr_c_stat = read_csr_c_stat;
+ csr_ops->read_csr_exp_stat = read_csr_exp_stat;
+ csr_ops->read_csr_exp_int_en = read_csr_exp_int_en;
+ csr_ops->write_csr_exp_int_en = write_csr_exp_int_en;
+ csr_ops->read_csr_ring_config = read_csr_ring_config;
+ csr_ops->write_csr_ring_config = write_csr_ring_config;
+ csr_ops->read_csr_ring_base = read_csr_ring_base;
+ csr_ops->write_csr_ring_base = write_csr_ring_base;
+ csr_ops->read_csr_int_en = read_csr_int_en;
+ csr_ops->write_csr_int_en = write_csr_int_en;
+ csr_ops->read_csr_int_flag = read_csr_int_flag;
+ csr_ops->write_csr_int_flag = write_csr_int_flag;
+ csr_ops->read_csr_int_srcsel = read_csr_int_srcsel;
+ csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
+ csr_ops->write_csr_int_srcsel_w_val = write_csr_int_srcsel_w_val;
+ csr_ops->read_csr_int_col_en = read_csr_int_col_en;
+ csr_ops->write_csr_int_col_en = write_csr_int_col_en;
+ csr_ops->read_csr_int_col_ctl = read_csr_int_col_ctl;
+ csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
+ csr_ops->read_csr_int_flag_and_col = read_csr_int_flag_and_col;
+ csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
+ csr_ops->read_csr_ring_srv_arb_en = read_csr_ring_srv_arb_en;
+ csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
+ csr_ops->get_int_col_ctl_enable_mask = get_int_col_ctl_enable_mask;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h
new file mode 100644
index 000000000000..6f33e7c87c2c
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation */
+#ifndef ADF_GEN4_HW_CSR_DATA_H_
+#define ADF_GEN4_HW_CSR_DATA_H_
+
+#include <linux/bitops.h>
+#include "adf_accel_devices.h"
+
+#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL
+#define ADF_RING_CSR_RING_CONFIG 0x1000
+#define ADF_RING_CSR_RING_LBASE 0x1040
+#define ADF_RING_CSR_RING_UBASE 0x1080
+#define ADF_RING_CSR_RING_HEAD 0x0C0
+#define ADF_RING_CSR_RING_TAIL 0x100
+#define ADF_RING_CSR_STAT 0x140
+#define ADF_RING_CSR_UO_STAT 0x148
+#define ADF_RING_CSR_E_STAT 0x14C
+#define ADF_RING_CSR_NE_STAT 0x150
+#define ADF_RING_CSR_NF_STAT 0x154
+#define ADF_RING_CSR_F_STAT 0x158
+#define ADF_RING_CSR_C_STAT 0x15C
+#define ADF_RING_CSR_INT_FLAG_EN 0x16C
+#define ADF_RING_CSR_INT_FLAG 0x170
+#define ADF_RING_CSR_INT_SRCSEL 0x174
+#define ADF_RING_CSR_INT_COL_EN 0x17C
+#define ADF_RING_CSR_INT_COL_CTL 0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
+#define ADF_RING_CSR_EXP_STAT 0x188
+#define ADF_RING_CSR_EXP_INT_EN 0x18C
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
+#define ADF_RING_CSR_ADDR_OFFSET 0x100000
+#define ADF_RING_BUNDLE_SIZE 0x2000
+#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C
+
+#define BUILD_RING_BASE_ADDR(addr, size) \
+ ((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6)
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_HEAD + ((ring) << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_TAIL + ((ring) << 2))
+#define READ_CSR_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_STAT)
+#define READ_CSR_UO_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_UO_STAT)
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_E_STAT)
+#define READ_CSR_NE_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_NE_STAT)
+#define READ_CSR_NF_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_NF_STAT)
+#define READ_CSR_F_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_F_STAT)
+#define READ_CSR_C_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_C_STAT)
+#define READ_CSR_EXP_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_EXP_STAT)
+#define READ_CSR_EXP_INT_EN(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_EXP_INT_EN)
+#define WRITE_CSR_EXP_INT_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_EXP_INT_EN, value)
+#define READ_CSR_RING_CONFIG(csr_base_addr, bank, ring) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_CONFIG + ((ring) << 2))
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
+do { \
+ void __iomem *_csr_base_addr = csr_base_addr; \
+ u32 _bank = bank; \
+ u32 _ring = ring; \
+ dma_addr_t _value = value; \
+ u32 l_base = 0, u_base = 0; \
+ l_base = lower_32_bits(_value); \
+ u_base = upper_32_bits(_value); \
+ ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (_bank) + \
+ ADF_RING_CSR_RING_LBASE + ((_ring) << 2), l_base); \
+ ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (_bank) + \
+ ADF_RING_CSR_RING_UBASE + ((_ring) << 2), u_base); \
+} while (0)
+
+static inline u64 read_base(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+ u32 l_base, u_base;
+
+ /*
+ * Use special IO wrapper for ring base as LBASE and UBASE are
+ * not physically contigious
+ */
+ l_base = ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) +
+ ADF_RING_CSR_RING_LBASE + (ring << 2));
+ u_base = ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) +
+ ADF_RING_CSR_RING_UBASE + (ring << 2));
+
+ return (u64)u_base << 32 | (u64)l_base;
+}
+
+#define READ_CSR_RING_BASE(csr_base_addr, bank, ring) \
+ read_base((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, (bank), (ring))
+
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+#define READ_CSR_INT_EN(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_FLAG_EN)
+#define WRITE_CSR_INT_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_FLAG_EN, (value))
+#define READ_CSR_INT_FLAG(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_FLAG)
+#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_FLAG, (value))
+#define READ_CSR_INT_SRCSEL(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_SRCSEL)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK)
+#define WRITE_CSR_INT_SRCSEL_W_VAL(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_SRCSEL, (value))
+#define READ_CSR_INT_COL_EN(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_COL_EN)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_COL_EN, (value))
+#define READ_CSR_INT_COL_CTL(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_COL_CTL)
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_COL_CTL, \
+ ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
+#define READ_CSR_INT_FLAG_AND_COL(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_FLAG_AND_COL)
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_FLAG_AND_COL, (value))
+
+#define READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_SRV_ARB_EN)
+#define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_SRV_ARB_EN, (value))
+
+void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index d28e1921940a..41a0979e68c1 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2020 Intel Corporation */
#include <linux/iopoll.h>
+#include <asm/div64.h>
#include "adf_accel_devices.h"
#include "adf_cfg_services.h"
#include "adf_common_drv.h"
@@ -8,103 +9,6 @@
#include "adf_gen4_hw_data.h"
#include "adf_gen4_pm.h"
-static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
-{
- return BUILD_RING_BASE_ADDR(addr, size);
-}
-
-static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
-{
- return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
-}
-
-static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
- u32 value)
-{
- WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
-}
-
-static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
-{
- return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
-}
-
-static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
- u32 value)
-{
- WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
-}
-
-static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
-{
- return READ_CSR_E_STAT(csr_base_addr, bank);
-}
-
-static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring,
- u32 value)
-{
- WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
-}
-
-static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
- dma_addr_t addr)
-{
- WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
-}
-
-static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
-{
- WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
-}
-
-static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value)
-{
- WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
-}
-
-static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
- u32 value)
-{
- WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
-}
-
-void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
-{
- csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
- csr_ops->read_csr_ring_head = read_csr_ring_head;
- csr_ops->write_csr_ring_head = write_csr_ring_head;
- csr_ops->read_csr_ring_tail = read_csr_ring_tail;
- csr_ops->write_csr_ring_tail = write_csr_ring_tail;
- csr_ops->read_csr_e_stat = read_csr_e_stat;
- csr_ops->write_csr_ring_config = write_csr_ring_config;
- csr_ops->write_csr_ring_base = write_csr_ring_base;
- csr_ops->write_csr_int_flag = write_csr_int_flag;
- csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
- csr_ops->write_csr_int_col_en = write_csr_int_col_en;
- csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
- csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
- csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops);
-
u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self)
{
return ADF_GEN4_ACCELERATORS_MASK;
@@ -321,8 +225,7 @@ static int reset_ring_pair(void __iomem *csr, u32 bank_number)
int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- u32 etr_bar_id = hw_data->get_etr_bar_id(hw_data);
- void __iomem *csr;
+ void __iomem *csr = adf_get_etr_base(accel_dev);
int ret;
if (bank_number >= hw_data->num_banks)
@@ -331,7 +234,6 @@ int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
dev_dbg(&GET_DEV(accel_dev),
"ring pair reset for bank:%d\n", bank_number);
- csr = (&GET_BARS(accel_dev)[etr_bar_id])->virt_addr;
ret = reset_ring_pair(csr, bank_number);
if (ret)
dev_err(&GET_DEV(accel_dev),
@@ -489,3 +391,281 @@ set_mask:
return ring_to_svc_map;
}
EXPORT_SYMBOL_GPL(adf_gen4_get_ring_to_svc_map);
+
+/*
+ * adf_gen4_bank_quiesce_coal_timer() - quiesce bank coalesced interrupt timer
+ * @accel_dev: Pointer to the device structure
+ * @bank_idx: Offset to the bank within this device
+ * @timeout_ms: Timeout in milliseconds for the operation
+ *
+ * This function tries to quiesce the coalesced interrupt timer of a bank if
+ * it has been enabled and triggered.
+ *
+ * Returns 0 on success, error code otherwise
+ *
+ */
+int adf_gen4_bank_quiesce_coal_timer(struct adf_accel_dev *accel_dev,
+ u32 bank_idx, int timeout_ms)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+ void __iomem *csr_misc = adf_get_pmisc_base(accel_dev);
+ void __iomem *csr_etr = adf_get_etr_base(accel_dev);
+ u32 int_col_ctl, int_col_mask, int_col_en;
+ u32 e_stat, intsrc;
+ u64 wait_us;
+ int ret;
+
+ if (timeout_ms < 0)
+ return -EINVAL;
+
+ int_col_ctl = csr_ops->read_csr_int_col_ctl(csr_etr, bank_idx);
+ int_col_mask = csr_ops->get_int_col_ctl_enable_mask();
+ if (!(int_col_ctl & int_col_mask))
+ return 0;
+
+ int_col_en = csr_ops->read_csr_int_col_en(csr_etr, bank_idx);
+ int_col_en &= BIT(ADF_WQM_CSR_RP_IDX_RX);
+
+ e_stat = csr_ops->read_csr_e_stat(csr_etr, bank_idx);
+ if (!(~e_stat & int_col_en))
+ return 0;
+
+ wait_us = 2 * ((int_col_ctl & ~int_col_mask) << 8) * USEC_PER_SEC;
+ do_div(wait_us, hw_data->clock_frequency);
+ wait_us = min(wait_us, (u64)timeout_ms * USEC_PER_MSEC);
+ dev_dbg(&GET_DEV(accel_dev),
+ "wait for bank %d - coalesced timer expires in %llu us (max=%u ms estat=0x%x intcolen=0x%x)\n",
+ bank_idx, wait_us, timeout_ms, e_stat, int_col_en);
+
+ ret = read_poll_timeout(ADF_CSR_RD, intsrc, intsrc,
+ ADF_COALESCED_POLL_DELAY_US, wait_us, true,
+ csr_misc, ADF_WQM_CSR_RPINTSOU(bank_idx));
+ if (ret)
+ dev_warn(&GET_DEV(accel_dev),
+ "coalesced timer for bank %d expired (%llu us)\n",
+ bank_idx, wait_us);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_bank_quiesce_coal_timer);
+
+static int drain_bank(void __iomem *csr, u32 bank_number, int timeout_us)
+{
+ u32 status;
+
+ ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
+ ADF_WQM_CSR_RPRESETCTL_DRAIN);
+
+ return read_poll_timeout(ADF_CSR_RD, status,
+ status & ADF_WQM_CSR_RPRESETSTS_STATUS,
+ ADF_RPRESET_POLL_DELAY_US, timeout_us, true,
+ csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
+}
+
+void adf_gen4_bank_drain_finish(struct adf_accel_dev *accel_dev,
+ u32 bank_number)
+{
+ void __iomem *csr = adf_get_etr_base(accel_dev);
+
+ ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number),
+ ADF_WQM_CSR_RPRESETSTS_STATUS);
+}
+
+int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev,
+ u32 bank_number, int timeout_us)
+{
+ void __iomem *csr = adf_get_etr_base(accel_dev);
+ int ret;
+
+ dev_dbg(&GET_DEV(accel_dev), "Drain bank %d\n", bank_number);
+
+ ret = drain_bank(csr, bank_number, timeout_us);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "Bank drain failed (timeout)\n");
+ else
+ dev_dbg(&GET_DEV(accel_dev), "Bank drain successful\n");
+
+ return ret;
+}
+
+static void bank_state_save(struct adf_hw_csr_ops *ops, void __iomem *base,
+ u32 bank, struct bank_state *state, u32 num_rings)
+{
+ u32 i;
+
+ state->ringstat0 = ops->read_csr_stat(base, bank);
+ state->ringuostat = ops->read_csr_uo_stat(base, bank);
+ state->ringestat = ops->read_csr_e_stat(base, bank);
+ state->ringnestat = ops->read_csr_ne_stat(base, bank);
+ state->ringnfstat = ops->read_csr_nf_stat(base, bank);
+ state->ringfstat = ops->read_csr_f_stat(base, bank);
+ state->ringcstat0 = ops->read_csr_c_stat(base, bank);
+ state->iaintflagen = ops->read_csr_int_en(base, bank);
+ state->iaintflagreg = ops->read_csr_int_flag(base, bank);
+ state->iaintflagsrcsel0 = ops->read_csr_int_srcsel(base, bank);
+ state->iaintcolen = ops->read_csr_int_col_en(base, bank);
+ state->iaintcolctl = ops->read_csr_int_col_ctl(base, bank);
+ state->iaintflagandcolen = ops->read_csr_int_flag_and_col(base, bank);
+ state->ringexpstat = ops->read_csr_exp_stat(base, bank);
+ state->ringexpintenable = ops->read_csr_exp_int_en(base, bank);
+ state->ringsrvarben = ops->read_csr_ring_srv_arb_en(base, bank);
+
+ for (i = 0; i < num_rings; i++) {
+ state->rings[i].head = ops->read_csr_ring_head(base, bank, i);
+ state->rings[i].tail = ops->read_csr_ring_tail(base, bank, i);
+ state->rings[i].config = ops->read_csr_ring_config(base, bank, i);
+ state->rings[i].base = ops->read_csr_ring_base(base, bank, i);
+ }
+}
+
+#define CHECK_STAT(op, expect_val, name, args...) \
+({ \
+ u32 __expect_val = (expect_val); \
+ u32 actual_val = op(args); \
+ (__expect_val == actual_val) ? 0 : \
+ (pr_err("QAT: Fail to restore %s register. Expected 0x%x, actual 0x%x\n", \
+ name, __expect_val, actual_val), -EINVAL); \
+})
+
+static int bank_state_restore(struct adf_hw_csr_ops *ops, void __iomem *base,
+ u32 bank, struct bank_state *state, u32 num_rings,
+ int tx_rx_gap)
+{
+ u32 val, tmp_val, i;
+ int ret;
+
+ for (i = 0; i < num_rings; i++)
+ ops->write_csr_ring_base(base, bank, i, state->rings[i].base);
+
+ for (i = 0; i < num_rings; i++)
+ ops->write_csr_ring_config(base, bank, i, state->rings[i].config);
+
+ for (i = 0; i < num_rings / 2; i++) {
+ int tx = i * (tx_rx_gap + 1);
+ int rx = tx + tx_rx_gap;
+
+ ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
+ ops->write_csr_ring_tail(base, bank, tx, state->rings[tx].tail);
+
+ /*
+ * The TX ring head needs to be updated again to make sure that
+ * the HW will not consider the ring as full when it is empty
+ * and the correct state flags are set to match the recovered state.
+ */
+ if (state->ringestat & BIT(tx)) {
+ val = ops->read_csr_int_srcsel(base, bank);
+ val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK;
+ ops->write_csr_int_srcsel_w_val(base, bank, val);
+ ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
+ }
+
+ ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
+ val = ops->read_csr_int_srcsel(base, bank);
+ val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
+ ops->write_csr_int_srcsel_w_val(base, bank, val);
+
+ ops->write_csr_ring_head(base, bank, rx, state->rings[rx].head);
+ val = ops->read_csr_int_srcsel(base, bank);
+ val |= ADF_RP_INT_SRC_SEL_F_FALL_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
+ ops->write_csr_int_srcsel_w_val(base, bank, val);
+
+ /*
+ * The RX ring tail needs to be updated again to make sure that
+ * the HW will not consider the ring as empty when it is full
+ * and the correct state flags are set to match the recovered state.
+ */
+ if (state->ringfstat & BIT(rx))
+ ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
+ }
+
+ ops->write_csr_int_flag_and_col(base, bank, state->iaintflagandcolen);
+ ops->write_csr_int_en(base, bank, state->iaintflagen);
+ ops->write_csr_int_col_en(base, bank, state->iaintcolen);
+ ops->write_csr_int_srcsel_w_val(base, bank, state->iaintflagsrcsel0);
+ ops->write_csr_exp_int_en(base, bank, state->ringexpintenable);
+ ops->write_csr_int_col_ctl(base, bank, state->iaintcolctl);
+ ops->write_csr_ring_srv_arb_en(base, bank, state->ringsrvarben);
+
+ /* Check that all ring statuses match the saved state. */
+ ret = CHECK_STAT(ops->read_csr_stat, state->ringstat0, "ringstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = CHECK_STAT(ops->read_csr_e_stat, state->ringestat, "ringestat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = CHECK_STAT(ops->read_csr_ne_stat, state->ringnestat, "ringnestat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = CHECK_STAT(ops->read_csr_nf_stat, state->ringnfstat, "ringnfstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = CHECK_STAT(ops->read_csr_f_stat, state->ringfstat, "ringfstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = CHECK_STAT(ops->read_csr_c_stat, state->ringcstat0, "ringcstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ tmp_val = ops->read_csr_exp_stat(base, bank);
+ val = state->ringexpstat;
+ if (tmp_val && !val) {
+ pr_err("QAT: Bank was restored with exception: 0x%x\n", val);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct bank_state *state)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+ void __iomem *csr_base = adf_get_etr_base(accel_dev);
+
+ if (bank_number >= hw_data->num_banks || !state)
+ return -EINVAL;
+
+ dev_dbg(&GET_DEV(accel_dev), "Saving state of bank %d\n", bank_number);
+
+ bank_state_save(csr_ops, csr_base, bank_number, state,
+ hw_data->num_rings_per_bank);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_bank_state_save);
+
+int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct bank_state *state)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+ void __iomem *csr_base = adf_get_etr_base(accel_dev);
+ int ret;
+
+ if (bank_number >= hw_data->num_banks || !state)
+ return -EINVAL;
+
+ dev_dbg(&GET_DEV(accel_dev), "Restoring state of bank %d\n", bank_number);
+
+ ret = bank_state_restore(csr_ops, csr_base, bank_number, state,
+ hw_data->num_rings_per_bank, hw_data->tx_rx_gap);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev),
+ "Unable to restore state of bank %d\n", bank_number);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_bank_state_restore);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index c6e80df5a85a..8b10926cedba 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
/* Copyright(c) 2020 Intel Corporation */
-#ifndef ADF_GEN4_HW_CSR_DATA_H_
-#define ADF_GEN4_HW_CSR_DATA_H_
+#ifndef ADF_GEN4_HW_DATA_H_
+#define ADF_GEN4_HW_DATA_H_
#include <linux/units.h>
@@ -54,95 +54,6 @@
#define ADF_GEN4_ADMINMSGLR_OFFSET 0x500578
#define ADF_GEN4_MAILBOX_BASE_OFFSET 0x600970
-/* Transport access */
-#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL
-#define ADF_RING_CSR_RING_CONFIG 0x1000
-#define ADF_RING_CSR_RING_LBASE 0x1040
-#define ADF_RING_CSR_RING_UBASE 0x1080
-#define ADF_RING_CSR_RING_HEAD 0x0C0
-#define ADF_RING_CSR_RING_TAIL 0x100
-#define ADF_RING_CSR_E_STAT 0x14C
-#define ADF_RING_CSR_INT_FLAG 0x170
-#define ADF_RING_CSR_INT_SRCSEL 0x174
-#define ADF_RING_CSR_INT_COL_CTL 0x180
-#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
-#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
-#define ADF_RING_CSR_INT_COL_EN 0x17C
-#define ADF_RING_CSR_ADDR_OFFSET 0x100000
-#define ADF_RING_BUNDLE_SIZE 0x2000
-
-#define BUILD_RING_BASE_ADDR(addr, size) \
- ((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6)
-#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
- ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_RING_HEAD + ((ring) << 2))
-#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
- ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_RING_TAIL + ((ring) << 2))
-#define READ_CSR_E_STAT(csr_base_addr, bank) \
- ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_E_STAT)
-#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value)
-#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
-do { \
- void __iomem *_csr_base_addr = csr_base_addr; \
- u32 _bank = bank; \
- u32 _ring = ring; \
- dma_addr_t _value = value; \
- u32 l_base = 0, u_base = 0; \
- l_base = lower_32_bits(_value); \
- u_base = upper_32_bits(_value); \
- ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (_bank) + \
- ADF_RING_CSR_RING_LBASE + ((_ring) << 2), l_base); \
- ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (_bank) + \
- ADF_RING_CSR_RING_UBASE + ((_ring) << 2), u_base); \
-} while (0)
-
-#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
-#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
-#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_INT_FLAG, (value))
-#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK)
-#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_INT_COL_EN, (value))
-#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_INT_COL_CTL, \
- ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
-#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_INT_FLAG_AND_COL, (value))
-
-/* Arbiter configuration */
-#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C
-
-#define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \
- ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_RING_SRV_ARB_EN, (value))
-
/* Default ring mapping */
#define ADF_GEN4_DEFAULT_RING_TO_SRV_MAP \
(ASYM << ADF_CFG_SERV_RING_PAIR_0_SHIFT | \
@@ -166,10 +77,20 @@ do { \
#define ADF_RPRESET_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
#define ADF_RPRESET_POLL_DELAY_US 20
#define ADF_WQM_CSR_RPRESETCTL_RESET BIT(0)
+#define ADF_WQM_CSR_RPRESETCTL_DRAIN BIT(2)
#define ADF_WQM_CSR_RPRESETCTL(bank) (0x6000 + ((bank) << 3))
#define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0)
#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
+/* Ring interrupt */
+#define ADF_RP_INT_SRC_SEL_F_RISE_MASK BIT(2)
+#define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0)
+#define ADF_RP_INT_SRC_SEL_RANGE_WIDTH 4
+#define ADF_COALESCED_POLL_TIMEOUT_US (1 * USEC_PER_SEC)
+#define ADF_COALESCED_POLL_DELAY_US 1000
+#define ADF_WQM_CSR_RPINTSOU(bank) (0x200000 + ((bank) << 12))
+#define ADF_WQM_CSR_RP_IDX_RX 1
+
/* Error source registers */
#define ADF_GEN4_ERRSOU0 (0x41A200)
#define ADF_GEN4_ERRSOU1 (0x41A204)
@@ -197,6 +118,19 @@ do { \
/* Arbiter threads mask with error value */
#define ADF_GEN4_ENA_THD_MASK_ERROR GENMASK(ADF_NUM_THREADS_PER_AE, 0)
+/* PF2VM communication channel */
+#define ADF_GEN4_PF2VM_OFFSET(i) (0x40B010 + (i) * 0x20)
+#define ADF_GEN4_VM2PF_OFFSET(i) (0x40B014 + (i) * 0x20)
+#define ADF_GEN4_VINTMSKPF2VM_OFFSET(i) (0x40B00C + (i) * 0x20)
+#define ADF_GEN4_VINTSOUPF2VM_OFFSET(i) (0x40B008 + (i) * 0x20)
+#define ADF_GEN4_VINTMSK_OFFSET(i) (0x40B004 + (i) * 0x20)
+#define ADF_GEN4_VINTSOU_OFFSET(i) (0x40B000 + (i) * 0x20)
+
+struct adf_gen4_vfmig {
+ struct adf_mstate_mgr *mstate_mgr;
+ bool bank_stopped[ADF_GEN4_NUM_BANKS_PER_VF];
+};
+
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
enum icp_qat_gen4_slice_mask {
@@ -230,11 +164,20 @@ u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self);
enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self);
u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self);
int adf_gen4_init_device(struct adf_accel_dev *accel_dev);
-void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev);
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev);
u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev);
+int adf_gen4_bank_quiesce_coal_timer(struct adf_accel_dev *accel_dev,
+ u32 bank_idx, int timeout_ms);
+int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev,
+ u32 bank_number, int timeout_us);
+void adf_gen4_bank_drain_finish(struct adf_accel_dev *accel_dev,
+ u32 bank_number);
+int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct bank_state *state);
+int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev,
+ u32 bank_number, struct bank_state *state);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c
index 8e8efe93f3ee..21474d402d09 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c
@@ -6,12 +6,10 @@
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_gen4_pfvf.h"
+#include "adf_gen4_hw_data.h"
#include "adf_pfvf_pf_proto.h"
#include "adf_pfvf_utils.h"
-#define ADF_4XXX_PF2VM_OFFSET(i) (0x40B010 + ((i) * 0x20))
-#define ADF_4XXX_VM2PF_OFFSET(i) (0x40B014 + ((i) * 0x20))
-
/* VF2PF interrupt source registers */
#define ADF_4XXX_VM2PF_SOU 0x41A180
#define ADF_4XXX_VM2PF_MSK 0x41A1C0
@@ -29,12 +27,12 @@ static const struct pfvf_csr_format csr_gen4_fmt = {
static u32 adf_gen4_pf_get_pf2vf_offset(u32 i)
{
- return ADF_4XXX_PF2VM_OFFSET(i);
+ return ADF_GEN4_PF2VM_OFFSET(i);
}
static u32 adf_gen4_pf_get_vf2pf_offset(u32 i)
{
- return ADF_4XXX_VM2PF_OFFSET(i);
+ return ADF_GEN4_VM2PF_OFFSET(i);
}
static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c
index 7fc7a77f6aed..c7ad8cf07863 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c
@@ -149,5 +149,6 @@ void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data)
tl_data->sl_exec_counters = sl_exec_counters;
tl_data->rp_counters = rp_counters;
tl_data->num_rp_counters = ARRAY_SIZE(rp_counters);
+ tl_data->max_sl_cnt = ADF_GEN4_TL_MAX_SLICES_PER_TYPE;
}
EXPORT_SYMBOL_GPL(adf_gen4_init_tl_data);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c
new file mode 100644
index 000000000000..a62eb5e8dbe6
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c
@@ -0,0 +1,1010 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024 Intel Corporation */
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/errno.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_hw_data.h"
+#include "adf_gen4_pfvf.h"
+#include "adf_pfvf_utils.h"
+#include "adf_mstate_mgr.h"
+#include "adf_gen4_vf_mig.h"
+
+#define ADF_GEN4_VF_MSTATE_SIZE 4096
+#define ADF_GEN4_PFVF_RSP_TIMEOUT_US 5000
+
+static int adf_gen4_vfmig_save_setup(struct qat_mig_dev *mdev);
+static int adf_gen4_vfmig_load_setup(struct qat_mig_dev *mdev, int len);
+
+static int adf_gen4_vfmig_init_device(struct qat_mig_dev *mdev)
+{
+ u8 *state;
+
+ state = kmalloc(ADF_GEN4_VF_MSTATE_SIZE, GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ mdev->state = state;
+ mdev->state_size = ADF_GEN4_VF_MSTATE_SIZE;
+ mdev->setup_size = 0;
+ mdev->remote_setup_size = 0;
+
+ return 0;
+}
+
+static void adf_gen4_vfmig_cleanup_device(struct qat_mig_dev *mdev)
+{
+ kfree(mdev->state);
+ mdev->state = NULL;
+}
+
+static void adf_gen4_vfmig_reset_device(struct qat_mig_dev *mdev)
+{
+ mdev->setup_size = 0;
+ mdev->remote_setup_size = 0;
+}
+
+static int adf_gen4_vfmig_open_device(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vfmig;
+
+ vf_info = &accel_dev->pf.vf_info[mdev->vf_id];
+
+ vfmig = kzalloc(sizeof(*vfmig), GFP_KERNEL);
+ if (!vfmig)
+ return -ENOMEM;
+
+ vfmig->mstate_mgr = adf_mstate_mgr_new(mdev->state, mdev->state_size);
+ if (!vfmig->mstate_mgr) {
+ kfree(vfmig);
+ return -ENOMEM;
+ }
+ vf_info->mig_priv = vfmig;
+ mdev->setup_size = 0;
+ mdev->remote_setup_size = 0;
+
+ return 0;
+}
+
+static void adf_gen4_vfmig_close_device(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vfmig;
+
+ vf_info = &accel_dev->pf.vf_info[mdev->vf_id];
+ if (vf_info->mig_priv) {
+ vfmig = vf_info->mig_priv;
+ adf_mstate_mgr_destroy(vfmig->mstate_mgr);
+ kfree(vfmig);
+ vf_info->mig_priv = NULL;
+ }
+}
+
+static int adf_gen4_vfmig_suspend_device(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vf_mig;
+ u32 vf_nr = mdev->vf_id;
+ int ret, i;
+
+ vf_info = &accel_dev->pf.vf_info[vf_nr];
+ vf_mig = vf_info->mig_priv;
+
+ /* Stop all inflight jobs */
+ for (i = 0; i < hw_data->num_banks_per_vf; i++) {
+ u32 pf_bank_nr = i + vf_nr * hw_data->num_banks_per_vf;
+
+ ret = adf_gen4_bank_drain_start(accel_dev, pf_bank_nr,
+ ADF_RPRESET_POLL_TIMEOUT_US);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to drain bank %d for vf_nr %d\n", i,
+ vf_nr);
+ return ret;
+ }
+ vf_mig->bank_stopped[i] = true;
+
+ adf_gen4_bank_quiesce_coal_timer(accel_dev, pf_bank_nr,
+ ADF_COALESCED_POLL_TIMEOUT_US);
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_resume_device(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vf_mig;
+ u32 vf_nr = mdev->vf_id;
+ int i;
+
+ vf_info = &accel_dev->pf.vf_info[vf_nr];
+ vf_mig = vf_info->mig_priv;
+
+ for (i = 0; i < hw_data->num_banks_per_vf; i++) {
+ u32 pf_bank_nr = i + vf_nr * hw_data->num_banks_per_vf;
+
+ if (vf_mig->bank_stopped[i]) {
+ adf_gen4_bank_drain_finish(accel_dev, pf_bank_nr);
+ vf_mig->bank_stopped[i] = false;
+ }
+ }
+
+ return 0;
+}
+
+struct adf_vf_bank_info {
+ struct adf_accel_dev *accel_dev;
+ u32 vf_nr;
+ u32 bank_nr;
+};
+
+struct mig_user_sla {
+ enum adf_base_services srv;
+ u64 rp_mask;
+ u32 cir;
+ u32 pir;
+};
+
+static int adf_mstate_sla_check(struct adf_mstate_mgr *sub_mgr, u8 *src_buf,
+ u32 src_size, void *opaque)
+{
+ struct adf_mstate_vreginfo _sinfo = { src_buf, src_size };
+ struct adf_mstate_vreginfo *sinfo = &_sinfo, *dinfo = opaque;
+ u32 src_sla_cnt = sinfo->size / sizeof(struct mig_user_sla);
+ u32 dst_sla_cnt = dinfo->size / sizeof(struct mig_user_sla);
+ struct mig_user_sla *src_slas = sinfo->addr;
+ struct mig_user_sla *dst_slas = dinfo->addr;
+ int i, j;
+
+ for (i = 0; i < src_sla_cnt; i++) {
+ for (j = 0; j < dst_sla_cnt; j++) {
+ if (src_slas[i].srv != dst_slas[j].srv ||
+ src_slas[i].rp_mask != dst_slas[j].rp_mask)
+ continue;
+
+ if (src_slas[i].cir > dst_slas[j].cir ||
+ src_slas[i].pir > dst_slas[j].pir) {
+ pr_err("QAT: DST VF rate limiting mismatch.\n");
+ return -EINVAL;
+ }
+ break;
+ }
+
+ if (j == dst_sla_cnt) {
+ pr_err("QAT: SRC VF rate limiting mismatch - SRC srv %d and rp_mask 0x%llx.\n",
+ src_slas[i].srv, src_slas[i].rp_mask);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static inline int adf_mstate_check_cap_size(u32 src_sz, u32 dst_sz, u32 max_sz)
+{
+ if (src_sz > max_sz || dst_sz > max_sz)
+ return -EINVAL;
+ else
+ return 0;
+}
+
+static int adf_mstate_compatver_check(struct adf_mstate_mgr *sub_mgr,
+ u8 *src_buf, u32 src_sz, void *opaque)
+{
+ struct adf_mstate_vreginfo *info = opaque;
+ u8 compat = 0;
+ u8 *pcompat;
+
+ if (src_sz != info->size) {
+ pr_debug("QAT: State mismatch (compat version size), current %u, expected %u\n",
+ src_sz, info->size);
+ return -EINVAL;
+ }
+
+ memcpy(info->addr, src_buf, info->size);
+ pcompat = info->addr;
+ if (*pcompat == 0) {
+ pr_warn("QAT: Unable to determine the version of VF\n");
+ return 0;
+ }
+
+ compat = adf_vf_compat_checker(*pcompat);
+ if (compat == ADF_PF2VF_VF_INCOMPATIBLE) {
+ pr_debug("QAT: SRC VF driver (ver=%u) is incompatible with DST PF driver (ver=%u)\n",
+ *pcompat, ADF_PFVF_COMPAT_THIS_VERSION);
+ return -EINVAL;
+ }
+
+ if (compat == ADF_PF2VF_VF_COMPAT_UNKNOWN)
+ pr_debug("QAT: SRC VF driver (ver=%u) is newer than DST PF driver (ver=%u)\n",
+ *pcompat, ADF_PFVF_COMPAT_THIS_VERSION);
+
+ return 0;
+}
+
+/*
+ * adf_mstate_capmask_compare() - compare QAT device capability mask
+ * @sinfo: Pointer to source capability info
+ * @dinfo: Pointer to target capability info
+ *
+ * This function compares the capability mask between source VF and target VF
+ *
+ * Returns: 0 if target capability mask is identical to source capability mask,
+ * 1 if target mask can represent all the capabilities represented by source mask,
+ * -1 if target mask can't represent all the capabilities represented by source
+ * mask.
+ */
+static int adf_mstate_capmask_compare(struct adf_mstate_vreginfo *sinfo,
+ struct adf_mstate_vreginfo *dinfo)
+{
+ u64 src = 0, dst = 0;
+
+ if (adf_mstate_check_cap_size(sinfo->size, dinfo->size, sizeof(u64))) {
+ pr_debug("QAT: Unexpected capability size %u %u %zu\n",
+ sinfo->size, dinfo->size, sizeof(u64));
+ return -1;
+ }
+
+ memcpy(&src, sinfo->addr, sinfo->size);
+ memcpy(&dst, dinfo->addr, dinfo->size);
+
+ pr_debug("QAT: Check cap compatibility of cap %llu %llu\n", src, dst);
+
+ if (src == dst)
+ return 0;
+
+ if ((src | dst) == dst)
+ return 1;
+
+ return -1;
+}
+
+static int adf_mstate_capmask_superset(struct adf_mstate_mgr *sub_mgr, u8 *buf,
+ u32 size, void *opa)
+{
+ struct adf_mstate_vreginfo sinfo = { buf, size };
+
+ if (adf_mstate_capmask_compare(&sinfo, opa) >= 0)
+ return 0;
+
+ return -EINVAL;
+}
+
+static int adf_mstate_capmask_equal(struct adf_mstate_mgr *sub_mgr, u8 *buf,
+ u32 size, void *opa)
+{
+ struct adf_mstate_vreginfo sinfo = { buf, size };
+
+ if (adf_mstate_capmask_compare(&sinfo, opa) == 0)
+ return 0;
+
+ return -EINVAL;
+}
+
+static int adf_mstate_set_vreg(struct adf_mstate_mgr *sub_mgr, u8 *buf,
+ u32 size, void *opa)
+{
+ struct adf_mstate_vreginfo *info = opa;
+
+ if (size != info->size) {
+ pr_debug("QAT: Unexpected cap size %u %u\n", size, info->size);
+ return -EINVAL;
+ }
+ memcpy(info->addr, buf, info->size);
+
+ return 0;
+}
+
+static u32 adf_gen4_vfmig_get_slas(struct adf_accel_dev *accel_dev, u32 vf_nr,
+ struct mig_user_sla *pmig_slas)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_rl *rl_data = accel_dev->rate_limiting;
+ struct rl_sla **sla_type_arr = NULL;
+ u64 rp_mask, rp_index;
+ u32 max_num_sla;
+ u32 sla_cnt = 0;
+ int i, j;
+
+ if (!accel_dev->rate_limiting)
+ return 0;
+
+ rp_index = vf_nr * hw_data->num_banks_per_vf;
+ max_num_sla = adf_rl_get_sla_arr_of_type(rl_data, RL_LEAF, &sla_type_arr);
+
+ for (i = 0; i < max_num_sla; i++) {
+ if (!sla_type_arr[i])
+ continue;
+
+ rp_mask = 0;
+ for (j = 0; j < sla_type_arr[i]->ring_pairs_cnt; j++)
+ rp_mask |= BIT(sla_type_arr[i]->ring_pairs_ids[j]);
+
+ if (rp_mask & GENMASK_ULL(rp_index + 3, rp_index)) {
+ pmig_slas->rp_mask = rp_mask;
+ pmig_slas->cir = sla_type_arr[i]->cir;
+ pmig_slas->pir = sla_type_arr[i]->pir;
+ pmig_slas->srv = sla_type_arr[i]->srv;
+ pmig_slas++;
+ sla_cnt++;
+ }
+ }
+
+ return sla_cnt;
+}
+
+static int adf_gen4_vfmig_load_etr_regs(struct adf_mstate_mgr *sub_mgr,
+ u8 *state, u32 size, void *opa)
+{
+ struct adf_vf_bank_info *vf_bank_info = opa;
+ struct adf_accel_dev *accel_dev = vf_bank_info->accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 pf_bank_nr;
+ int ret;
+
+ pf_bank_nr = vf_bank_info->bank_nr + vf_bank_info->vf_nr * hw_data->num_banks_per_vf;
+ ret = hw_data->bank_state_restore(accel_dev, pf_bank_nr,
+ (struct bank_state *)state);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to load regs for vf%d bank%d\n",
+ vf_bank_info->vf_nr, vf_bank_info->bank_nr);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_etr_bank(struct adf_accel_dev *accel_dev,
+ u32 vf_nr, u32 bank_nr,
+ struct adf_mstate_mgr *mstate_mgr)
+{
+ struct adf_vf_bank_info vf_bank_info = {accel_dev, vf_nr, bank_nr};
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct adf_mstate_mgr sub_sects_mgr;
+ char bank_ids[ADF_MSTATE_ID_LEN];
+
+ snprintf(bank_ids, sizeof(bank_ids), ADF_MSTATE_BANK_IDX_IDS "%x", bank_nr);
+ subsec = adf_mstate_sect_lookup(mstate_mgr, bank_ids, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to lookup sec %s for vf%d bank%d\n",
+ ADF_MSTATE_BANK_IDX_IDS, vf_nr, bank_nr);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
+ l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, ADF_MSTATE_ETR_REGS_IDS,
+ adf_gen4_vfmig_load_etr_regs,
+ &vf_bank_info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to add sec %s for vf%d bank%d\n",
+ ADF_MSTATE_ETR_REGS_IDS, vf_nr, bank_nr);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_etr(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct adf_mstate_sect_h *subsec;
+ int ret, i;
+
+ subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_ETRB_IDS, NULL,
+ NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
+ ADF_MSTATE_ETRB_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
+ for (i = 0; i < hw_data->num_banks_per_vf; i++) {
+ ret = adf_gen4_vfmig_load_etr_bank(accel_dev, vf_nr, i,
+ &sub_sects_mgr);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_misc(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct {
+ char *id;
+ u64 ofs;
+ } misc_states[] = {
+ {ADF_MSTATE_VINTMSK_IDS, ADF_GEN4_VINTMSK_OFFSET(vf_nr)},
+ {ADF_MSTATE_VINTMSK_PF2VM_IDS, ADF_GEN4_VINTMSKPF2VM_OFFSET(vf_nr)},
+ {ADF_MSTATE_PF2VM_IDS, ADF_GEN4_PF2VM_OFFSET(vf_nr)},
+ {ADF_MSTATE_VM2PF_IDS, ADF_GEN4_VM2PF_OFFSET(vf_nr)},
+ };
+ int i;
+
+ subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_MISCB_IDS, NULL,
+ NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
+ ADF_MSTATE_MISCB_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
+ for (i = 0; i < ARRAY_SIZE(misc_states); i++) {
+ struct adf_mstate_vreginfo info;
+ u32 regv;
+
+ info.addr = &regv;
+ info.size = sizeof(regv);
+ l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr,
+ misc_states[i].id,
+ adf_mstate_set_vreg,
+ &info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to load sec %s\n", misc_states[i].id);
+ return -EINVAL;
+ }
+ ADF_CSR_WR(csr, misc_states[i].ofs, regv);
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_generic(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct mig_user_sla dst_slas[RL_RP_CNT_PER_LEAF_MAX] = { };
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct adf_mstate_mgr sub_sects_mgr;
+ u32 dst_sla_cnt;
+ struct {
+ char *id;
+ int (*action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size, void *opa);
+ struct adf_mstate_vreginfo info;
+ } gen_states[] = {
+ {ADF_MSTATE_IOV_INIT_IDS, adf_mstate_set_vreg,
+ {&vf_info->init, sizeof(vf_info->init)}},
+ {ADF_MSTATE_COMPAT_VER_IDS, adf_mstate_compatver_check,
+ {&vf_info->vf_compat_ver, sizeof(vf_info->vf_compat_ver)}},
+ {ADF_MSTATE_SLA_IDS, adf_mstate_sla_check, {dst_slas, 0}},
+ };
+ int i;
+
+ subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_GEN_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
+ ADF_MSTATE_GEN_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
+ for (i = 0; i < ARRAY_SIZE(gen_states); i++) {
+ if (gen_states[i].info.addr == dst_slas) {
+ dst_sla_cnt = adf_gen4_vfmig_get_slas(accel_dev, vf_nr, dst_slas);
+ gen_states[i].info.size = dst_sla_cnt * sizeof(struct mig_user_sla);
+ }
+
+ l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr,
+ gen_states[i].id,
+ gen_states[i].action,
+ &gen_states[i].info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
+ gen_states[i].id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_config(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct {
+ char *id;
+ int (*action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size, void *opa);
+ struct adf_mstate_vreginfo info;
+ } setups[] = {
+ {ADF_MSTATE_GEN_CAP_IDS, adf_mstate_capmask_superset,
+ {&hw_data->accel_capabilities_mask, sizeof(hw_data->accel_capabilities_mask)}},
+ {ADF_MSTATE_GEN_SVCMAP_IDS, adf_mstate_capmask_equal,
+ {&hw_data->ring_to_svc_map, sizeof(hw_data->ring_to_svc_map)}},
+ {ADF_MSTATE_GEN_EXTDC_IDS, adf_mstate_capmask_superset,
+ {&hw_data->extended_dc_capabilities, sizeof(hw_data->extended_dc_capabilities)}},
+ };
+ int i;
+
+ subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_CONFIG_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
+ ADF_MSTATE_CONFIG_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec);
+ for (i = 0; i < ARRAY_SIZE(setups); i++) {
+ l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, setups[i].id,
+ setups[i].action, &setups[i].info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n",
+ setups[i].id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_etr_regs(struct adf_mstate_mgr *subs, u8 *state,
+ u32 size, void *opa)
+{
+ struct adf_vf_bank_info *vf_bank_info = opa;
+ struct adf_accel_dev *accel_dev = vf_bank_info->accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 pf_bank_nr;
+ int ret;
+
+ pf_bank_nr = vf_bank_info->bank_nr;
+ pf_bank_nr += vf_bank_info->vf_nr * hw_data->num_banks_per_vf;
+
+ ret = hw_data->bank_state_save(accel_dev, pf_bank_nr,
+ (struct bank_state *)state);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to save regs for vf%d bank%d\n",
+ vf_bank_info->vf_nr, vf_bank_info->bank_nr);
+ return ret;
+ }
+
+ return sizeof(struct bank_state);
+}
+
+static int adf_gen4_vfmig_save_etr_bank(struct adf_accel_dev *accel_dev,
+ u32 vf_nr, u32 bank_nr,
+ struct adf_mstate_mgr *mstate_mgr)
+{
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct adf_vf_bank_info vf_bank_info;
+ struct adf_mstate_mgr sub_sects_mgr;
+ char bank_ids[ADF_MSTATE_ID_LEN];
+
+ snprintf(bank_ids, sizeof(bank_ids), ADF_MSTATE_BANK_IDX_IDS "%x", bank_nr);
+
+ subsec = adf_mstate_sect_add(mstate_mgr, bank_ids, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to add sec %s for vf%d bank%d\n",
+ ADF_MSTATE_BANK_IDX_IDS, vf_nr, bank_nr);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
+ vf_bank_info.accel_dev = accel_dev;
+ vf_bank_info.vf_nr = vf_nr;
+ vf_bank_info.bank_nr = bank_nr;
+ l2_subsec = adf_mstate_sect_add(&sub_sects_mgr, ADF_MSTATE_ETR_REGS_IDS,
+ adf_gen4_vfmig_save_etr_regs,
+ &vf_bank_info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to add sec %s for vf%d bank%d\n",
+ ADF_MSTATE_ETR_REGS_IDS, vf_nr, bank_nr);
+ return -EINVAL;
+ }
+ adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_etr(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct adf_mstate_sect_h *subsec;
+ int ret, i;
+
+ subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_ETRB_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
+ ADF_MSTATE_ETRB_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
+ for (i = 0; i < hw_data->num_banks_per_vf; i++) {
+ ret = adf_gen4_vfmig_save_etr_bank(accel_dev, vf_nr, i,
+ &sub_sects_mgr);
+ if (ret)
+ return ret;
+ }
+ adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_misc(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct {
+ char *id;
+ u64 offset;
+ } misc_states[] = {
+ {ADF_MSTATE_VINTSRC_IDS, ADF_GEN4_VINTSOU_OFFSET(vf_nr)},
+ {ADF_MSTATE_VINTMSK_IDS, ADF_GEN4_VINTMSK_OFFSET(vf_nr)},
+ {ADF_MSTATE_VINTSRC_PF2VM_IDS, ADF_GEN4_VINTSOUPF2VM_OFFSET(vf_nr)},
+ {ADF_MSTATE_VINTMSK_PF2VM_IDS, ADF_GEN4_VINTMSKPF2VM_OFFSET(vf_nr)},
+ {ADF_MSTATE_PF2VM_IDS, ADF_GEN4_PF2VM_OFFSET(vf_nr)},
+ {ADF_MSTATE_VM2PF_IDS, ADF_GEN4_VM2PF_OFFSET(vf_nr)},
+ };
+ ktime_t time_exp;
+ int i;
+
+ subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_MISCB_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
+ ADF_MSTATE_MISCB_IDS);
+ return -EINVAL;
+ }
+
+ time_exp = ktime_add_us(ktime_get(), ADF_GEN4_PFVF_RSP_TIMEOUT_US);
+ while (!mutex_trylock(&vf_info->pfvf_mig_lock)) {
+ if (ktime_after(ktime_get(), time_exp)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to get pfvf mig lock\n");
+ return -ETIMEDOUT;
+ }
+ usleep_range(500, 1000);
+ }
+
+ adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
+ for (i = 0; i < ARRAY_SIZE(misc_states); i++) {
+ struct adf_mstate_vreginfo info;
+ u32 regv;
+
+ info.addr = &regv;
+ info.size = sizeof(regv);
+ regv = ADF_CSR_RD(csr, misc_states[i].offset);
+
+ l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr,
+ misc_states[i].id,
+ &info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
+ misc_states[i].id);
+ mutex_unlock(&vf_info->pfvf_mig_lock);
+ return -EINVAL;
+ }
+ }
+
+ mutex_unlock(&vf_info->pfvf_mig_lock);
+ adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_generic(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct mig_user_sla src_slas[RL_RP_CNT_PER_LEAF_MAX] = { };
+ u32 src_sla_cnt;
+ struct {
+ char *id;
+ struct adf_mstate_vreginfo info;
+ } gen_states[] = {
+ {ADF_MSTATE_IOV_INIT_IDS,
+ {&vf_info->init, sizeof(vf_info->init)}},
+ {ADF_MSTATE_COMPAT_VER_IDS,
+ {&vf_info->vf_compat_ver, sizeof(vf_info->vf_compat_ver)}},
+ {ADF_MSTATE_SLA_IDS, {src_slas, 0}},
+ };
+ int i;
+
+ subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_GEN_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
+ ADF_MSTATE_GEN_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
+ for (i = 0; i < ARRAY_SIZE(gen_states); i++) {
+ if (gen_states[i].info.addr == src_slas) {
+ src_sla_cnt = adf_gen4_vfmig_get_slas(accel_dev, vf_nr, src_slas);
+ gen_states[i].info.size = src_sla_cnt * sizeof(struct mig_user_sla);
+ }
+
+ l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr,
+ gen_states[i].id,
+ &gen_states[i].info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
+ gen_states[i].id);
+ return -EINVAL;
+ }
+ }
+ adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_config(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct adf_mstate_sect_h *subsec, *l2_subsec;
+ struct {
+ char *id;
+ struct adf_mstate_vreginfo info;
+ } setups[] = {
+ {ADF_MSTATE_GEN_CAP_IDS,
+ {&hw_data->accel_capabilities_mask, sizeof(hw_data->accel_capabilities_mask)}},
+ {ADF_MSTATE_GEN_SVCMAP_IDS,
+ {&hw_data->ring_to_svc_map, sizeof(hw_data->ring_to_svc_map)}},
+ {ADF_MSTATE_GEN_EXTDC_IDS,
+ {&hw_data->extended_dc_capabilities, sizeof(hw_data->extended_dc_capabilities)}},
+ };
+ int i;
+
+ subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_CONFIG_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
+ ADF_MSTATE_CONFIG_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr);
+ for (i = 0; i < ARRAY_SIZE(setups); i++) {
+ l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr, setups[i].id,
+ &setups[i].info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n",
+ setups[i].id);
+ return -EINVAL;
+ }
+ }
+ adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_state(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vfmig;
+ u32 vf_nr = mdev->vf_id;
+ int ret;
+
+ vf_info = &accel_dev->pf.vf_info[vf_nr];
+ vfmig = vf_info->mig_priv;
+
+ ret = adf_gen4_vfmig_save_setup(mdev);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to save setup for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state + mdev->setup_size,
+ mdev->state_size - mdev->setup_size);
+ if (!adf_mstate_preamble_add(vfmig->mstate_mgr))
+ return -EINVAL;
+
+ ret = adf_gen4_vfmig_save_generic(accel_dev, vf_nr);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to save generic state for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ ret = adf_gen4_vfmig_save_misc(accel_dev, vf_nr);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to save misc bar state for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ ret = adf_gen4_vfmig_save_etr(accel_dev, vf_nr);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to save etr bar state for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ adf_mstate_preamble_update(vfmig->mstate_mgr);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_state(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vfmig;
+ u32 vf_nr = mdev->vf_id;
+ int ret;
+
+ vf_info = &accel_dev->pf.vf_info[vf_nr];
+ vfmig = vf_info->mig_priv;
+
+ ret = adf_gen4_vfmig_load_setup(mdev, mdev->state_size);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load setup for vf_nr %d\n",
+ vf_nr);
+ return ret;
+ }
+
+ ret = adf_mstate_mgr_init_from_remote(vfmig->mstate_mgr,
+ mdev->state + mdev->remote_setup_size,
+ mdev->state_size - mdev->remote_setup_size,
+ NULL, NULL);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Invalid state for vf_nr %d\n",
+ vf_nr);
+ return ret;
+ }
+
+ ret = adf_gen4_vfmig_load_generic(accel_dev, vf_nr);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to load general state for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ ret = adf_gen4_vfmig_load_misc(accel_dev, vf_nr);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to load misc bar state for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ ret = adf_gen4_vfmig_load_etr(accel_dev, vf_nr);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to load etr bar state for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_setup(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vfmig;
+ u32 vf_nr = mdev->vf_id;
+ int ret;
+
+ vf_info = &accel_dev->pf.vf_info[vf_nr];
+ vfmig = vf_info->mig_priv;
+
+ if (mdev->setup_size)
+ return 0;
+
+ adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state, mdev->state_size);
+ if (!adf_mstate_preamble_add(vfmig->mstate_mgr))
+ return -EINVAL;
+
+ ret = adf_gen4_vfmig_save_config(accel_dev, mdev->vf_id);
+ if (ret)
+ return ret;
+
+ adf_mstate_preamble_update(vfmig->mstate_mgr);
+ mdev->setup_size = adf_mstate_state_size(vfmig->mstate_mgr);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_setup(struct qat_mig_dev *mdev, int len)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+ struct adf_accel_vf_info *vf_info;
+ struct adf_gen4_vfmig *vfmig;
+ u32 vf_nr = mdev->vf_id;
+ u32 setup_size;
+ int ret;
+
+ vf_info = &accel_dev->pf.vf_info[vf_nr];
+ vfmig = vf_info->mig_priv;
+
+ if (mdev->remote_setup_size)
+ return 0;
+
+ if (len < sizeof(struct adf_mstate_preh))
+ return -EAGAIN;
+
+ adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state, mdev->state_size);
+ setup_size = adf_mstate_state_size_from_remote(vfmig->mstate_mgr);
+ if (setup_size > mdev->state_size)
+ return -EINVAL;
+
+ if (len < setup_size)
+ return -EAGAIN;
+
+ ret = adf_mstate_mgr_init_from_remote(vfmig->mstate_mgr, mdev->state,
+ setup_size, NULL, NULL);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Invalid setup for vf_nr %d\n",
+ vf_nr);
+ return ret;
+ }
+
+ mdev->remote_setup_size = setup_size;
+
+ ret = adf_gen4_vfmig_load_config(accel_dev, vf_nr);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to load config for vf_nr %d\n", vf_nr);
+ return ret;
+ }
+
+ return 0;
+}
+
+void adf_gen4_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops)
+{
+ vfmig_ops->init = adf_gen4_vfmig_init_device;
+ vfmig_ops->cleanup = adf_gen4_vfmig_cleanup_device;
+ vfmig_ops->reset = adf_gen4_vfmig_reset_device;
+ vfmig_ops->open = adf_gen4_vfmig_open_device;
+ vfmig_ops->close = adf_gen4_vfmig_close_device;
+ vfmig_ops->suspend = adf_gen4_vfmig_suspend_device;
+ vfmig_ops->resume = adf_gen4_vfmig_resume_device;
+ vfmig_ops->save_state = adf_gen4_vfmig_save_state;
+ vfmig_ops->load_state = adf_gen4_vfmig_load_state;
+ vfmig_ops->load_setup = adf_gen4_vfmig_load_setup;
+ vfmig_ops->save_setup = adf_gen4_vfmig_save_setup;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_vf_mig_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h
new file mode 100644
index 000000000000..72216d078ee1
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation */
+#ifndef ADF_GEN4_VF_MIG_H_
+#define ADF_GEN4_VF_MIG_H_
+
+#include "adf_accel_devices.h"
+
+void adf_gen4_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c
new file mode 100644
index 000000000000..41cc763a74aa
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024 Intel Corporation */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include "adf_mstate_mgr.h"
+
+#define ADF_MSTATE_MAGIC 0xADF5CAEA
+#define ADF_MSTATE_VERSION 0x1
+
+struct adf_mstate_sect_h {
+ u8 id[ADF_MSTATE_ID_LEN];
+ u32 size;
+ u32 sub_sects;
+ u8 state[];
+};
+
+u32 adf_mstate_state_size(struct adf_mstate_mgr *mgr)
+{
+ return mgr->state - mgr->buf;
+}
+
+static inline u32 adf_mstate_avail_room(struct adf_mstate_mgr *mgr)
+{
+ return mgr->buf + mgr->size - mgr->state;
+}
+
+void adf_mstate_mgr_init(struct adf_mstate_mgr *mgr, u8 *buf, u32 size)
+{
+ mgr->buf = buf;
+ mgr->state = buf;
+ mgr->size = size;
+ mgr->n_sects = 0;
+};
+
+struct adf_mstate_mgr *adf_mstate_mgr_new(u8 *buf, u32 size)
+{
+ struct adf_mstate_mgr *mgr;
+
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return NULL;
+
+ adf_mstate_mgr_init(mgr, buf, size);
+
+ return mgr;
+}
+
+void adf_mstate_mgr_destroy(struct adf_mstate_mgr *mgr)
+{
+ kfree(mgr);
+}
+
+void adf_mstate_mgr_init_from_parent(struct adf_mstate_mgr *mgr,
+ struct adf_mstate_mgr *p_mgr)
+{
+ adf_mstate_mgr_init(mgr, p_mgr->state,
+ p_mgr->size - adf_mstate_state_size(p_mgr));
+}
+
+void adf_mstate_mgr_init_from_psect(struct adf_mstate_mgr *mgr,
+ struct adf_mstate_sect_h *p_sect)
+{
+ adf_mstate_mgr_init(mgr, p_sect->state, p_sect->size);
+ mgr->n_sects = p_sect->sub_sects;
+}
+
+static void adf_mstate_preamble_init(struct adf_mstate_preh *preamble)
+{
+ preamble->magic = ADF_MSTATE_MAGIC;
+ preamble->version = ADF_MSTATE_VERSION;
+ preamble->preh_len = sizeof(*preamble);
+ preamble->size = 0;
+ preamble->n_sects = 0;
+}
+
+/* default preambles checker */
+static int adf_mstate_preamble_def_checker(struct adf_mstate_preh *preamble,
+ void *opaque)
+{
+ struct adf_mstate_mgr *mgr = opaque;
+
+ if (preamble->magic != ADF_MSTATE_MAGIC ||
+ preamble->version > ADF_MSTATE_VERSION ||
+ preamble->preh_len > mgr->size) {
+ pr_debug("QAT: LM - Invalid state (magic=%#x, version=%#x, hlen=%u), state_size=%u\n",
+ preamble->magic, preamble->version, preamble->preh_len,
+ mgr->size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct adf_mstate_preh *adf_mstate_preamble_add(struct adf_mstate_mgr *mgr)
+{
+ struct adf_mstate_preh *pre = (struct adf_mstate_preh *)mgr->buf;
+
+ if (adf_mstate_avail_room(mgr) < sizeof(*pre)) {
+ pr_err("QAT: LM - Not enough space for preamble\n");
+ return NULL;
+ }
+
+ adf_mstate_preamble_init(pre);
+ mgr->state += pre->preh_len;
+
+ return pre;
+}
+
+int adf_mstate_preamble_update(struct adf_mstate_mgr *mgr)
+{
+ struct adf_mstate_preh *preamble = (struct adf_mstate_preh *)mgr->buf;
+
+ preamble->size = adf_mstate_state_size(mgr) - preamble->preh_len;
+ preamble->n_sects = mgr->n_sects;
+
+ return 0;
+}
+
+static void adf_mstate_dump_sect(struct adf_mstate_sect_h *sect,
+ const char *prefix)
+{
+ pr_debug("QAT: LM - %s QAT state section %s\n", prefix, sect->id);
+ print_hex_dump_debug("h-", DUMP_PREFIX_OFFSET, 16, 2, sect,
+ sizeof(*sect), true);
+ print_hex_dump_debug("s-", DUMP_PREFIX_OFFSET, 16, 2, sect->state,
+ sect->size, true);
+}
+
+static inline void __adf_mstate_sect_update(struct adf_mstate_mgr *mgr,
+ struct adf_mstate_sect_h *sect,
+ u32 size,
+ u32 n_subsects)
+{
+ sect->size += size;
+ sect->sub_sects += n_subsects;
+ mgr->n_sects++;
+ mgr->state += sect->size;
+
+ adf_mstate_dump_sect(sect, "Add");
+}
+
+void adf_mstate_sect_update(struct adf_mstate_mgr *p_mgr,
+ struct adf_mstate_mgr *curr_mgr,
+ struct adf_mstate_sect_h *sect)
+{
+ __adf_mstate_sect_update(p_mgr, sect, adf_mstate_state_size(curr_mgr),
+ curr_mgr->n_sects);
+}
+
+static struct adf_mstate_sect_h *adf_mstate_sect_add_header(struct adf_mstate_mgr *mgr,
+ const char *id)
+{
+ struct adf_mstate_sect_h *sect = (struct adf_mstate_sect_h *)(mgr->state);
+
+ if (adf_mstate_avail_room(mgr) < sizeof(*sect)) {
+ pr_debug("QAT: LM - Not enough space for header of QAT state sect %s\n", id);
+ return NULL;
+ }
+
+ strscpy(sect->id, id, sizeof(sect->id));
+ sect->size = 0;
+ sect->sub_sects = 0;
+ mgr->state += sizeof(*sect);
+
+ return sect;
+}
+
+struct adf_mstate_sect_h *adf_mstate_sect_add_vreg(struct adf_mstate_mgr *mgr,
+ const char *id,
+ struct adf_mstate_vreginfo *info)
+{
+ struct adf_mstate_sect_h *sect;
+
+ sect = adf_mstate_sect_add_header(mgr, id);
+ if (!sect)
+ return NULL;
+
+ if (adf_mstate_avail_room(mgr) < info->size) {
+ pr_debug("QAT: LM - Not enough space for QAT state sect %s, requires %u\n",
+ id, info->size);
+ return NULL;
+ }
+
+ memcpy(sect->state, info->addr, info->size);
+ __adf_mstate_sect_update(mgr, sect, info->size, 0);
+
+ return sect;
+}
+
+struct adf_mstate_sect_h *adf_mstate_sect_add(struct adf_mstate_mgr *mgr,
+ const char *id,
+ adf_mstate_populate populate,
+ void *opaque)
+{
+ struct adf_mstate_mgr sub_sects_mgr;
+ struct adf_mstate_sect_h *sect;
+ int avail_room, size;
+
+ sect = adf_mstate_sect_add_header(mgr, id);
+ if (!sect)
+ return NULL;
+
+ if (!populate)
+ return sect;
+
+ avail_room = adf_mstate_avail_room(mgr);
+ adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mgr);
+
+ size = (*populate)(&sub_sects_mgr, sect->state, avail_room, opaque);
+ if (size < 0)
+ return NULL;
+
+ size += adf_mstate_state_size(&sub_sects_mgr);
+ if (avail_room < size) {
+ pr_debug("QAT: LM - Not enough space for QAT state sect %s, requires %u\n",
+ id, size);
+ return NULL;
+ }
+ __adf_mstate_sect_update(mgr, sect, size, sub_sects_mgr.n_sects);
+
+ return sect;
+}
+
+static int adf_mstate_sect_validate(struct adf_mstate_mgr *mgr)
+{
+ struct adf_mstate_sect_h *start = (struct adf_mstate_sect_h *)mgr->state;
+ struct adf_mstate_sect_h *sect = start;
+ u64 end;
+ int i;
+
+ end = (uintptr_t)mgr->buf + mgr->size;
+ for (i = 0; i < mgr->n_sects; i++) {
+ uintptr_t s_start = (uintptr_t)sect->state;
+ uintptr_t s_end = s_start + sect->size;
+
+ if (s_end < s_start || s_end > end) {
+ pr_debug("QAT: LM - Corrupted state section (index=%u, size=%u) in state_mgr (size=%u, secs=%u)\n",
+ i, sect->size, mgr->size, mgr->n_sects);
+ return -EINVAL;
+ }
+ sect = (struct adf_mstate_sect_h *)s_end;
+ }
+
+ pr_debug("QAT: LM - Scanned section (last child=%s, size=%lu) in state_mgr (size=%u, secs=%u)\n",
+ start->id, sizeof(struct adf_mstate_sect_h) * (ulong)(sect - start),
+ mgr->size, mgr->n_sects);
+
+ return 0;
+}
+
+u32 adf_mstate_state_size_from_remote(struct adf_mstate_mgr *mgr)
+{
+ struct adf_mstate_preh *preh = (struct adf_mstate_preh *)mgr->buf;
+
+ return preh->preh_len + preh->size;
+}
+
+int adf_mstate_mgr_init_from_remote(struct adf_mstate_mgr *mgr, u8 *buf, u32 size,
+ adf_mstate_preamble_checker pre_checker,
+ void *opaque)
+{
+ struct adf_mstate_preh *pre;
+ int ret;
+
+ adf_mstate_mgr_init(mgr, buf, size);
+ pre = (struct adf_mstate_preh *)(mgr->buf);
+
+ pr_debug("QAT: LM - Dump state preambles\n");
+ print_hex_dump_debug("", DUMP_PREFIX_OFFSET, 16, 2, pre, pre->preh_len, 0);
+
+ if (pre_checker)
+ ret = (*pre_checker)(pre, opaque);
+ else
+ ret = adf_mstate_preamble_def_checker(pre, mgr);
+ if (ret)
+ return ret;
+
+ mgr->state = mgr->buf + pre->preh_len;
+ mgr->n_sects = pre->n_sects;
+
+ return adf_mstate_sect_validate(mgr);
+}
+
+struct adf_mstate_sect_h *adf_mstate_sect_lookup(struct adf_mstate_mgr *mgr,
+ const char *id,
+ adf_mstate_action action,
+ void *opaque)
+{
+ struct adf_mstate_sect_h *sect = (struct adf_mstate_sect_h *)mgr->state;
+ struct adf_mstate_mgr sub_sects_mgr;
+ int i, ret;
+
+ for (i = 0; i < mgr->n_sects; i++) {
+ if (!strncmp(sect->id, id, sizeof(sect->id)))
+ goto found;
+
+ sect = (struct adf_mstate_sect_h *)(sect->state + sect->size);
+ }
+
+ return NULL;
+
+found:
+ adf_mstate_dump_sect(sect, "Found");
+
+ adf_mstate_mgr_init_from_psect(&sub_sects_mgr, sect);
+ if (sect->sub_sects && adf_mstate_sect_validate(&sub_sects_mgr))
+ return NULL;
+
+ if (!action)
+ return sect;
+
+ ret = (*action)(&sub_sects_mgr, sect->state, sect->size, opaque);
+ if (ret)
+ return NULL;
+
+ return sect;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h
new file mode 100644
index 000000000000..81d263a596c5
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation */
+
+#ifndef ADF_MSTATE_MGR_H
+#define ADF_MSTATE_MGR_H
+
+#define ADF_MSTATE_ID_LEN 8
+
+#define ADF_MSTATE_ETRB_IDS "ETRBAR"
+#define ADF_MSTATE_MISCB_IDS "MISCBAR"
+#define ADF_MSTATE_EXTB_IDS "EXTBAR"
+#define ADF_MSTATE_GEN_IDS "GENER"
+#define ADF_MSTATE_CONFIG_IDS "CONFIG"
+#define ADF_MSTATE_SECTION_NUM 5
+
+#define ADF_MSTATE_BANK_IDX_IDS "bnk"
+
+#define ADF_MSTATE_ETR_REGS_IDS "mregs"
+#define ADF_MSTATE_VINTSRC_IDS "visrc"
+#define ADF_MSTATE_VINTMSK_IDS "vimsk"
+#define ADF_MSTATE_SLA_IDS "sla"
+#define ADF_MSTATE_IOV_INIT_IDS "iovinit"
+#define ADF_MSTATE_COMPAT_VER_IDS "compver"
+#define ADF_MSTATE_GEN_CAP_IDS "gencap"
+#define ADF_MSTATE_GEN_SVCMAP_IDS "svcmap"
+#define ADF_MSTATE_GEN_EXTDC_IDS "extdc"
+#define ADF_MSTATE_VINTSRC_PF2VM_IDS "vispv"
+#define ADF_MSTATE_VINTMSK_PF2VM_IDS "vimpv"
+#define ADF_MSTATE_VM2PF_IDS "vm2pf"
+#define ADF_MSTATE_PF2VM_IDS "pf2vm"
+
+struct adf_mstate_mgr {
+ u8 *buf;
+ u8 *state;
+ u32 size;
+ u32 n_sects;
+};
+
+struct adf_mstate_preh {
+ u32 magic;
+ u32 version;
+ u16 preh_len;
+ u16 n_sects;
+ u32 size;
+};
+
+struct adf_mstate_vreginfo {
+ void *addr;
+ u32 size;
+};
+
+struct adf_mstate_sect_h;
+
+typedef int (*adf_mstate_preamble_checker)(struct adf_mstate_preh *preamble, void *opa);
+typedef int (*adf_mstate_populate)(struct adf_mstate_mgr *sub_mgr, u8 *buf,
+ u32 size, void *opa);
+typedef int (*adf_mstate_action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size,
+ void *opa);
+
+struct adf_mstate_mgr *adf_mstate_mgr_new(u8 *buf, u32 size);
+void adf_mstate_mgr_destroy(struct adf_mstate_mgr *mgr);
+void adf_mstate_mgr_init(struct adf_mstate_mgr *mgr, u8 *buf, u32 size);
+void adf_mstate_mgr_init_from_parent(struct adf_mstate_mgr *mgr,
+ struct adf_mstate_mgr *p_mgr);
+void adf_mstate_mgr_init_from_psect(struct adf_mstate_mgr *mgr,
+ struct adf_mstate_sect_h *p_sect);
+int adf_mstate_mgr_init_from_remote(struct adf_mstate_mgr *mgr,
+ u8 *buf, u32 size,
+ adf_mstate_preamble_checker checker,
+ void *opaque);
+struct adf_mstate_preh *adf_mstate_preamble_add(struct adf_mstate_mgr *mgr);
+int adf_mstate_preamble_update(struct adf_mstate_mgr *mgr);
+u32 adf_mstate_state_size(struct adf_mstate_mgr *mgr);
+u32 adf_mstate_state_size_from_remote(struct adf_mstate_mgr *mgr);
+void adf_mstate_sect_update(struct adf_mstate_mgr *p_mgr,
+ struct adf_mstate_mgr *curr_mgr,
+ struct adf_mstate_sect_h *sect);
+struct adf_mstate_sect_h *adf_mstate_sect_add_vreg(struct adf_mstate_mgr *mgr,
+ const char *id,
+ struct adf_mstate_vreginfo *info);
+struct adf_mstate_sect_h *adf_mstate_sect_add(struct adf_mstate_mgr *mgr,
+ const char *id,
+ adf_mstate_populate populate,
+ void *opaque);
+struct adf_mstate_sect_h *adf_mstate_sect_lookup(struct adf_mstate_mgr *mgr,
+ const char *id,
+ adf_mstate_action action,
+ void *opaque);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
index 9ab93fbfefde..b9b5e744a3f1 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
@@ -242,13 +242,7 @@ static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr,
"VersionRequest received from VF%d (vers %d) to PF (vers %d)\n",
vf_nr, vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
- if (vf_compat_ver == 0)
- compat = ADF_PF2VF_VF_INCOMPATIBLE;
- else if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION)
- compat = ADF_PF2VF_VF_COMPATIBLE;
- else
- compat = ADF_PF2VF_VF_COMPAT_UNKNOWN;
-
+ compat = adf_vf_compat_checker(vf_compat_ver);
vf_info->vf_compat_ver = vf_compat_ver;
resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h
index 2be048e2287b..1a044297d873 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h
@@ -28,4 +28,15 @@ u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev, struct pfvf_message msg
struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev, u32 raw_msg,
const struct pfvf_csr_format *fmt);
+static inline u8 adf_vf_compat_checker(u8 vf_compat_ver)
+{
+ if (vf_compat_ver == 0)
+ return ADF_PF2VF_VF_INCOMPATIBLE;
+
+ if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION)
+ return ADF_PF2VF_VF_COMPATIBLE;
+
+ return ADF_PF2VF_VF_COMPAT_UNKNOWN;
+}
+
#endif /* ADF_PFVF_UTILS_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c
index d4f2db3c53d8..346ef8bee99d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c
@@ -183,14 +183,14 @@ static enum adf_cfg_service_type srv_to_cfg_svc_type(enum adf_base_services rl_s
}
/**
- * get_sla_arr_of_type() - Returns a pointer to SLA type specific array
+ * adf_rl_get_sla_arr_of_type() - Returns a pointer to SLA type specific array
* @rl_data: pointer to ratelimiting data
* @type: SLA type
* @sla_arr: pointer to variable where requested pointer will be stored
*
* Return: Max number of elements allowed for the returned array
*/
-static u32 get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type,
+u32 adf_rl_get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type,
struct rl_sla ***sla_arr)
{
switch (type) {
@@ -778,7 +778,7 @@ static void clear_sla(struct adf_rl *rl_data, struct rl_sla *sla)
rp_in_use[sla->ring_pairs_ids[i]] = false;
update_budget(sla, old_cir, true);
- get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr);
+ adf_rl_get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr);
assign_node_to_parent(rl_data->accel_dev, sla, true);
adf_rl_send_admin_delete_msg(rl_data->accel_dev, node_id, sla->type);
mark_rps_usage(sla, rl_data->rp_in_use, false);
@@ -875,7 +875,7 @@ static int add_update_sla(struct adf_accel_dev *accel_dev,
if (!is_update) {
mark_rps_usage(sla, rl_data->rp_in_use, true);
- get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr);
+ adf_rl_get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr);
sla_type_arr[sla->node_id] = sla;
rl_data->sla[sla->sla_id] = sla;
}
@@ -1065,7 +1065,7 @@ void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default)
/* Unregister and remove all SLAs */
for (j = RL_LEAF; j >= end_type; j--) {
- max_id = get_sla_arr_of_type(rl_data, j, &sla_type_arr);
+ max_id = adf_rl_get_sla_arr_of_type(rl_data, j, &sla_type_arr);
for (i = 0; i < max_id; i++) {
if (!sla_type_arr[i])
@@ -1125,7 +1125,7 @@ int adf_rl_start(struct adf_accel_dev *accel_dev)
}
if ((fw_caps & RL_CAPABILITY_MASK) != RL_CAPABILITY_VALUE) {
- dev_info(&GET_DEV(accel_dev), "not supported\n");
+ dev_info(&GET_DEV(accel_dev), "feature not supported by FW\n");
ret = -EOPNOTSUPP;
goto ret_free;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.h b/drivers/crypto/intel/qat/qat_common/adf_rl.h
index 269c6656fb90..bfe750ea0e83 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl.h
@@ -151,6 +151,8 @@ struct rl_sla {
u16 ring_pairs_cnt;
};
+u32 adf_rl_get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type,
+ struct rl_sla ***sla_arr);
int adf_rl_add_sla(struct adf_accel_dev *accel_dev,
struct adf_rl_sla_input_data *sla_in);
int adf_rl_update_sla(struct adf_accel_dev *accel_dev,
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
index 87a70c00c41e..8d645e7e04aa 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
@@ -26,10 +26,12 @@ static void adf_iov_send_resp(struct work_struct *work)
u32 vf_nr = vf_info->vf_nr;
bool ret;
+ mutex_lock(&vf_info->pfvf_mig_lock);
ret = adf_recv_and_handle_vf2pf_msg(accel_dev, vf_nr);
if (ret)
/* re-enable interrupt on PF from this VF */
adf_enable_vf2pf_interrupts(accel_dev, 1 << vf_nr);
+ mutex_unlock(&vf_info->pfvf_mig_lock);
kfree(pf2vf_resp);
}
@@ -62,6 +64,7 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
vf_info->vf_nr = i;
mutex_init(&vf_info->pf2vf_lock);
+ mutex_init(&vf_info->pfvf_mig_lock);
ratelimit_state_init(&vf_info->vf2pf_ratelimit,
ADF_VF2PF_RATELIMIT_INTERVAL,
ADF_VF2PF_RATELIMIT_BURST);
@@ -138,8 +141,10 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
if (hw_data->configure_iov_threads)
hw_data->configure_iov_threads(accel_dev, false);
- for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++)
+ for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
mutex_destroy(&vf->pf2vf_lock);
+ mutex_destroy(&vf->pfvf_mig_lock);
+ }
if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) {
kfree(accel_dev->pf.vf_info);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
index 2ff714d11bd2..74fb0c2ed241 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
@@ -41,6 +41,20 @@ static int validate_tl_data(struct adf_tl_hw_data *tl_data)
return 0;
}
+static int validate_tl_slice_counters(struct icp_qat_fw_init_admin_slice_cnt *slice_count,
+ u8 max_slices_per_type)
+{
+ u8 *sl_counter = (u8 *)slice_count;
+ int i;
+
+ for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) {
+ if (sl_counter[i] > max_slices_per_type)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev)
{
struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
@@ -214,6 +228,13 @@ int adf_tl_run(struct adf_accel_dev *accel_dev, int state)
return ret;
}
+ ret = validate_tl_slice_counters(&telemetry->slice_cnt, tl_data->max_sl_cnt);
+ if (ret) {
+ dev_err(dev, "invalid value returned by FW\n");
+ adf_send_admin_tl_stop(accel_dev);
+ return ret;
+ }
+
telemetry->hbuffs = state;
atomic_set(&telemetry->state, state);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
index 9be81cd3b886..e54a406cc1b4 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
@@ -40,6 +40,7 @@ struct adf_tl_hw_data {
u8 num_dev_counters;
u8 num_rp_counters;
u8 max_rp;
+ u8 max_sl_cnt;
};
struct adf_telemetry {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport.c b/drivers/crypto/intel/qat/qat_common/adf_transport.c
index 630d0483c4e0..1efdf46490f1 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport.c
@@ -474,7 +474,6 @@ err:
int adf_init_etr_data(struct adf_accel_dev *accel_dev)
{
struct adf_etr_data *etr_data;
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
void __iomem *csr_addr;
u32 size;
u32 num_banks = 0;
@@ -495,8 +494,7 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
}
accel_dev->transport = etr_data;
- i = hw_data->get_etr_bar_id(hw_data);
- csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
+ csr_addr = adf_get_etr_base(accel_dev);
/* accel_dev->debugfs_dir should always be non-NULL here */
etr_data->debug = debugfs_create_dir("transport",
diff --git a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
index 4128200a9032..85c682e248fb 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
@@ -110,6 +110,8 @@ struct qat_dh_ctx {
unsigned int p_size;
bool g2;
struct qat_crypto_instance *inst;
+ struct crypto_kpp *ftfm;
+ bool fallback;
} __packed __aligned(64);
struct qat_asym_request {
@@ -381,6 +383,36 @@ unmap_src:
return ret;
}
+static int qat_dh_generate_public_key(struct kpp_request *req)
+{
+ struct kpp_request *nreq = kpp_request_ctx(req);
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ if (ctx->fallback) {
+ memcpy(nreq, req, sizeof(*req));
+ kpp_request_set_tfm(nreq, ctx->ftfm);
+ return crypto_kpp_generate_public_key(nreq);
+ }
+
+ return qat_dh_compute_value(req);
+}
+
+static int qat_dh_compute_shared_secret(struct kpp_request *req)
+{
+ struct kpp_request *nreq = kpp_request_ctx(req);
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ if (ctx->fallback) {
+ memcpy(nreq, req, sizeof(*req));
+ kpp_request_set_tfm(nreq, ctx->ftfm);
+ return crypto_kpp_compute_shared_secret(nreq);
+ }
+
+ return qat_dh_compute_value(req);
+}
+
static int qat_dh_check_params_length(unsigned int p_len)
{
switch (p_len) {
@@ -398,9 +430,6 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
- if (qat_dh_check_params_length(params->p_size << 3))
- return -EINVAL;
-
ctx->p_size = params->p_size;
ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
if (!ctx->p)
@@ -454,6 +483,13 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
if (crypto_dh_decode_key(buf, len, &params) < 0)
return -EINVAL;
+ if (qat_dh_check_params_length(params.p_size << 3)) {
+ ctx->fallback = true;
+ return crypto_kpp_set_secret(ctx->ftfm, buf, len);
+ }
+
+ ctx->fallback = false;
+
/* Free old secret if any */
qat_dh_clear_ctx(dev, ctx);
@@ -481,6 +517,9 @@ static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
{
struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+ if (ctx->fallback)
+ return crypto_kpp_maxsize(ctx->ftfm);
+
return ctx->p_size;
}
@@ -489,11 +528,22 @@ static int qat_dh_init_tfm(struct crypto_kpp *tfm)
struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
struct qat_crypto_instance *inst =
qat_crypto_get_instance_node(numa_node_id());
+ const char *alg = kpp_alg_name(tfm);
+ unsigned int reqsize;
if (!inst)
return -EINVAL;
- kpp_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64);
+ ctx->ftfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->ftfm))
+ return PTR_ERR(ctx->ftfm);
+
+ crypto_kpp_set_flags(ctx->ftfm, crypto_kpp_get_flags(tfm));
+
+ reqsize = max(sizeof(struct qat_asym_request) + 64,
+ sizeof(struct kpp_request) + crypto_kpp_reqsize(ctx->ftfm));
+
+ kpp_set_reqsize(tfm, reqsize);
ctx->p_size = 0;
ctx->g2 = false;
@@ -506,6 +556,9 @@ static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+ if (ctx->ftfm)
+ crypto_free_kpp(ctx->ftfm);
+
qat_dh_clear_ctx(dev, ctx);
qat_crypto_put_instance(ctx->inst);
}
@@ -1265,8 +1318,8 @@ static struct akcipher_alg rsa = {
static struct kpp_alg dh = {
.set_secret = qat_dh_set_secret,
- .generate_public_key = qat_dh_compute_value,
- .compute_shared_secret = qat_dh_compute_value,
+ .generate_public_key = qat_dh_generate_public_key,
+ .compute_shared_secret = qat_dh_compute_shared_secret,
.max_size = qat_dh_max_size,
.init = qat_dh_init_tfm,
.exit = qat_dh_exit_tfm,
@@ -1276,6 +1329,7 @@ static struct kpp_alg dh = {
.cra_priority = 1000,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct qat_dh_ctx),
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
},
};
diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c
index 76baed0a76c0..338acf29c487 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_bl.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_bl.c
@@ -81,7 +81,8 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
if (unlikely(!bufl))
return -ENOMEM;
} else {
- bufl = &buf->sgl_src.sgl_hdr;
+ bufl = container_of(&buf->sgl_src.sgl_hdr,
+ struct qat_alg_buf_list, hdr);
memset(bufl, 0, sizeof(struct qat_alg_buf_list));
buf->sgl_src_valid = true;
}
@@ -139,7 +140,8 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
if (unlikely(!buflout))
goto err_in;
} else {
- buflout = &buf->sgl_dst.sgl_hdr;
+ buflout = container_of(&buf->sgl_dst.sgl_hdr,
+ struct qat_alg_buf_list, hdr);
memset(buflout, 0, sizeof(struct qat_alg_buf_list));
buf->sgl_dst_valid = true;
}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.h b/drivers/crypto/intel/qat/qat_common/qat_bl.h
index d87e4f35ac39..85bc32a9ec0e 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_bl.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_bl.h
@@ -15,14 +15,17 @@ struct qat_alg_buf {
} __packed;
struct qat_alg_buf_list {
- u64 resrvd;
- u32 num_bufs;
- u32 num_mapped_bufs;
+ /* New members must be added within the __struct_group() macro below. */
+ __struct_group(qat_alg_buf_list_hdr, hdr, __packed,
+ u64 resrvd;
+ u32 num_bufs;
+ u32 num_mapped_bufs;
+ );
struct qat_alg_buf buffers[];
} __packed;
struct qat_alg_fixed_buf_list {
- struct qat_alg_buf_list sgl_hdr;
+ struct qat_alg_buf_list_hdr sgl_hdr;
struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
} __packed __aligned(64);
diff --git a/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c b/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c
new file mode 100644
index 000000000000..892c2283a50e
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024 Intel Corporation */
+#include <linux/dev_printk.h>
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/qat/qat_mig_dev.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+
+struct qat_mig_dev *qat_vfmig_create(struct pci_dev *pdev, int vf_id)
+{
+ struct adf_accel_dev *accel_dev;
+ struct qat_migdev_ops *ops;
+ struct qat_mig_dev *mdev;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+ if (!accel_dev)
+ return ERR_PTR(-ENODEV);
+
+ ops = GET_VFMIG_OPS(accel_dev);
+ if (!ops || !ops->init || !ops->cleanup || !ops->reset || !ops->open ||
+ !ops->close || !ops->suspend || !ops->resume || !ops->save_state ||
+ !ops->load_state || !ops->save_setup || !ops->load_setup)
+ return ERR_PTR(-EINVAL);
+
+ mdev = kmalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return ERR_PTR(-ENOMEM);
+
+ mdev->vf_id = vf_id;
+ mdev->parent_accel_dev = accel_dev;
+
+ return mdev;
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_create);
+
+int qat_vfmig_init(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->init(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_init);
+
+void qat_vfmig_cleanup(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->cleanup(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_cleanup);
+
+void qat_vfmig_reset(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->reset(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_reset);
+
+int qat_vfmig_open(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->open(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_open);
+
+void qat_vfmig_close(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ GET_VFMIG_OPS(accel_dev)->close(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_close);
+
+int qat_vfmig_suspend(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->suspend(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_suspend);
+
+int qat_vfmig_resume(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->resume(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_resume);
+
+int qat_vfmig_save_state(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->save_state(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_save_state);
+
+int qat_vfmig_save_setup(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->save_setup(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_save_setup);
+
+int qat_vfmig_load_state(struct qat_mig_dev *mdev)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->load_state(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_load_state);
+
+int qat_vfmig_load_setup(struct qat_mig_dev *mdev, int size)
+{
+ struct adf_accel_dev *accel_dev = mdev->parent_accel_dev;
+
+ return GET_VFMIG_OPS(accel_dev)->load_setup(mdev, size);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_load_setup);
+
+void qat_vfmig_destroy(struct qat_mig_dev *mdev)
+{
+ kfree(mdev);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_destroy);
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index af14090cc4be..6e24d57e6b98 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -5,6 +5,7 @@
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_dh895xcc_hw_data.h"
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
index 70e56cc16ece..f4ee4c2e00da 100644
--- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -4,6 +4,7 @@
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include <adf_pfvf_vf_msg.h>
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
index 79b4e74804f6..6bfc59e67747 100644
--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
@@ -138,6 +138,10 @@ int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev,
return -ENOMEM;
cptr_dma = dma_map_single(&pdev->dev, hctx, CN10K_CPT_HW_CTX_SIZE,
DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&pdev->dev, cptr_dma)) {
+ kfree(hctx);
+ return -ENOMEM;
+ }
cn10k_cpt_hw_ctx_set(hctx, 1);
er_ctx->hw_ctx = hctx;
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 2b3ebe0db3a6..057d73c370b7 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/stmp_device.h>
#include <linux/clk.h>
+#include <soc/fsl/dcp.h>
#include <crypto/aes.h>
#include <crypto/sha1.h>
@@ -101,6 +102,7 @@ struct dcp_async_ctx {
struct crypto_skcipher *fallback;
unsigned int key_len;
uint8_t key[AES_KEYSIZE_128];
+ bool key_referenced;
};
struct dcp_aes_req_ctx {
@@ -155,6 +157,7 @@ static struct dcp *global_sdcp;
#define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
#define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
#define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
+#define MXS_DCP_CONTROL0_OTP_KEY (1 << 10)
#define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
#define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
#define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
@@ -168,6 +171,8 @@ static struct dcp *global_sdcp;
#define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
#define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
+#define MXS_DCP_CONTROL1_KEY_SELECT_SHIFT 8
+
static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
{
int dma_err;
@@ -224,13 +229,16 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
struct dcp *sdcp = global_sdcp;
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
+ bool key_referenced = actx->key_referenced;
int ret;
- key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
- 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
- ret = dma_mapping_error(sdcp->dev, key_phys);
- if (ret)
- return ret;
+ if (!key_referenced) {
+ key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
+ 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
+ ret = dma_mapping_error(sdcp->dev, key_phys);
+ if (ret)
+ return ret;
+ }
src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
DCP_BUF_SZ, DMA_TO_DEVICE);
@@ -255,8 +263,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
MXS_DCP_CONTROL0_INTERRUPT |
MXS_DCP_CONTROL0_ENABLE_CIPHER;
- /* Payload contains the key. */
- desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
+ if (key_referenced)
+ /* Set OTP key bit to select the key via KEY_SELECT. */
+ desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY;
+ else
+ /* Payload contains the key. */
+ desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
if (rctx->enc)
desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
@@ -270,6 +282,9 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
else
desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
+ if (key_referenced)
+ desc->control1 |= sdcp->coh->aes_key[0] << MXS_DCP_CONTROL1_KEY_SELECT_SHIFT;
+
desc->next_cmd_addr = 0;
desc->source = src_phys;
desc->destination = dst_phys;
@@ -284,9 +299,9 @@ aes_done_run:
err_dst:
dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
err_src:
- dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
- DMA_TO_DEVICE);
-
+ if (!key_referenced)
+ dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
+ DMA_TO_DEVICE);
return ret;
}
@@ -453,7 +468,7 @@ static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb)
struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
int ret;
- if (unlikely(actx->key_len != AES_KEYSIZE_128))
+ if (unlikely(actx->key_len != AES_KEYSIZE_128 && !actx->key_referenced))
return mxs_dcp_block_fallback(req, enc);
rctx->enc = enc;
@@ -500,6 +515,7 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
* there can still be an operation in progress.
*/
actx->key_len = len;
+ actx->key_referenced = false;
if (len == AES_KEYSIZE_128) {
memcpy(actx->key, key, len);
return 0;
@@ -516,6 +532,32 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
return crypto_skcipher_setkey(actx->fallback, key, len);
}
+static int mxs_dcp_aes_setrefkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
+
+ if (len != DCP_PAES_KEYSIZE)
+ return -EINVAL;
+
+ switch (key[0]) {
+ case DCP_PAES_KEY_SLOT0:
+ case DCP_PAES_KEY_SLOT1:
+ case DCP_PAES_KEY_SLOT2:
+ case DCP_PAES_KEY_SLOT3:
+ case DCP_PAES_KEY_UNIQUE:
+ case DCP_PAES_KEY_OTP:
+ memcpy(actx->key, key, len);
+ actx->key_len = len;
+ actx->key_referenced = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
{
const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
@@ -539,6 +581,13 @@ static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
crypto_free_skcipher(actx->fallback);
}
+static int mxs_dcp_paes_init_tfm(struct crypto_skcipher *tfm)
+{
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx));
+
+ return 0;
+}
+
/*
* Hashing (SHA1/SHA256)
*/
@@ -889,6 +938,39 @@ static struct skcipher_alg dcp_aes_algs[] = {
.ivsize = AES_BLOCK_SIZE,
.init = mxs_dcp_aes_fallback_init_tfm,
.exit = mxs_dcp_aes_fallback_exit_tfm,
+ }, {
+ .base.cra_name = "ecb(paes)",
+ .base.cra_driver_name = "ecb-paes-dcp",
+ .base.cra_priority = 401,
+ .base.cra_alignmask = 15,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DCP_PAES_KEYSIZE,
+ .max_keysize = DCP_PAES_KEYSIZE,
+ .setkey = mxs_dcp_aes_setrefkey,
+ .encrypt = mxs_dcp_aes_ecb_encrypt,
+ .decrypt = mxs_dcp_aes_ecb_decrypt,
+ .init = mxs_dcp_paes_init_tfm,
+ }, {
+ .base.cra_name = "cbc(paes)",
+ .base.cra_driver_name = "cbc-paes-dcp",
+ .base.cra_priority = 401,
+ .base.cra_alignmask = 15,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DCP_PAES_KEYSIZE,
+ .max_keysize = DCP_PAES_KEYSIZE,
+ .setkey = mxs_dcp_aes_setrefkey,
+ .encrypt = mxs_dcp_aes_cbc_encrypt,
+ .decrypt = mxs_dcp_aes_cbc_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .init = mxs_dcp_paes_init_tfm,
},
};
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 2ab90ec10e61..82214cde2bcd 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -251,7 +251,9 @@ int nx842_crypto_compress(struct crypto_tfm *tfm,
u8 *dst, unsigned int *dlen)
{
struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
- struct nx842_crypto_header *hdr = &ctx->header;
+ struct nx842_crypto_header *hdr =
+ container_of(&ctx->header,
+ struct nx842_crypto_header, hdr);
struct nx842_crypto_param p;
struct nx842_constraints c = *ctx->driver->constraints;
unsigned int groups, hdrsize, h;
@@ -490,7 +492,7 @@ int nx842_crypto_decompress(struct crypto_tfm *tfm,
}
memcpy(&ctx->header, src, hdr_len);
- hdr = &ctx->header;
+ hdr = container_of(&ctx->header, struct nx842_crypto_header, hdr);
for (n = 0; n < hdr->groups; n++) {
/* ignore applies to last group */
diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h
index 7590bfb24d79..25fa70b2112c 100644
--- a/drivers/crypto/nx/nx-842.h
+++ b/drivers/crypto/nx/nx-842.h
@@ -157,9 +157,11 @@ struct nx842_crypto_header_group {
} __packed;
struct nx842_crypto_header {
- __be16 magic; /* NX842_CRYPTO_MAGIC */
- __be16 ignore; /* decompressed end bytes to ignore */
- u8 groups; /* total groups in this header */
+ struct_group_tagged(nx842_crypto_header_hdr, hdr,
+ __be16 magic; /* NX842_CRYPTO_MAGIC */
+ __be16 ignore; /* decompressed end bytes to ignore */
+ u8 groups; /* total groups in this header */
+ );
struct nx842_crypto_header_group group[];
} __packed;
@@ -171,7 +173,7 @@ struct nx842_crypto_ctx {
u8 *wmem;
u8 *sbounce, *dbounce;
- struct nx842_crypto_header header;
+ struct nx842_crypto_header_hdr header;
struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX];
struct nx842_driver *driver;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 3423b5cde1c7..96d4af5d48a6 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -559,7 +559,7 @@ static int sahara_aes_process(struct skcipher_request *req)
struct sahara_ctx *ctx;
struct sahara_aes_reqctx *rctx;
int ret;
- unsigned long timeout;
+ unsigned long time_left;
/* Request is ready to be dispatched by the device */
dev_dbg(dev->device,
@@ -597,15 +597,15 @@ static int sahara_aes_process(struct skcipher_request *req)
if (ret)
return -EINVAL;
- timeout = wait_for_completion_timeout(&dev->dma_completion,
- msecs_to_jiffies(SAHARA_TIMEOUT_MS));
+ time_left = wait_for_completion_timeout(&dev->dma_completion,
+ msecs_to_jiffies(SAHARA_TIMEOUT_MS));
dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
DMA_FROM_DEVICE);
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE);
- if (!timeout) {
+ if (!time_left) {
dev_err(dev->device, "AES timeout\n");
return -ETIMEDOUT;
}
@@ -931,7 +931,7 @@ static int sahara_sha_process(struct ahash_request *req)
struct sahara_dev *dev = dev_ptr;
struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
int ret;
- unsigned long timeout;
+ unsigned long time_left;
ret = sahara_sha_prepare_request(req);
if (!ret)
@@ -963,14 +963,14 @@ static int sahara_sha_process(struct ahash_request *req)
sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
- timeout = wait_for_completion_timeout(&dev->dma_completion,
- msecs_to_jiffies(SAHARA_TIMEOUT_MS));
+ time_left = wait_for_completion_timeout(&dev->dma_completion,
+ msecs_to_jiffies(SAHARA_TIMEOUT_MS));
if (rctx->sg_in_idx)
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE);
- if (!timeout) {
+ if (!time_left) {
dev_err(dev->device, "SHA timeout\n");
return -ETIMEDOUT;
}
diff --git a/drivers/crypto/starfive/Kconfig b/drivers/crypto/starfive/Kconfig
index cb59357b58b2..0fe389e9f932 100644
--- a/drivers/crypto/starfive/Kconfig
+++ b/drivers/crypto/starfive/Kconfig
@@ -14,6 +14,10 @@ config CRYPTO_DEV_JH7110
select CRYPTO_RSA
select CRYPTO_AES
select CRYPTO_CCM
+ select CRYPTO_GCM
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_CTR
help
Support for StarFive JH7110 crypto hardware acceleration engine.
This module provides acceleration for public key algo,
diff --git a/drivers/crypto/starfive/jh7110-aes.c b/drivers/crypto/starfive/jh7110-aes.c
index 1ac15cc4ef3c..86a1a1fa9f8f 100644
--- a/drivers/crypto/starfive/jh7110-aes.c
+++ b/drivers/crypto/starfive/jh7110-aes.c
@@ -78,7 +78,7 @@ static inline int is_gcm(struct starfive_cryp_dev *cryp)
return (cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_GCM;
}
-static inline int is_encrypt(struct starfive_cryp_dev *cryp)
+static inline bool is_encrypt(struct starfive_cryp_dev *cryp)
{
return cryp->flags & FLG_ENCRYPT;
}
@@ -103,16 +103,6 @@ static void starfive_aes_aead_hw_start(struct starfive_cryp_ctx *ctx, u32 hw_mod
}
}
-static inline void starfive_aes_set_ivlen(struct starfive_cryp_ctx *ctx)
-{
- struct starfive_cryp_dev *cryp = ctx->cryp;
-
- if (is_gcm(cryp))
- writel(GCM_AES_IV_SIZE, cryp->base + STARFIVE_AES_IVLEN);
- else
- writel(AES_BLOCK_SIZE, cryp->base + STARFIVE_AES_IVLEN);
-}
-
static inline void starfive_aes_set_alen(struct starfive_cryp_ctx *ctx)
{
struct starfive_cryp_dev *cryp = ctx->cryp;
@@ -261,7 +251,6 @@ static int starfive_aes_hw_init(struct starfive_cryp_ctx *ctx)
rctx->csr.aes.mode = hw_mode;
rctx->csr.aes.cmode = !is_encrypt(cryp);
- rctx->csr.aes.ie = 1;
rctx->csr.aes.stmode = STARFIVE_AES_MODE_XFB_1;
if (cryp->side_chan) {
@@ -279,7 +268,7 @@ static int starfive_aes_hw_init(struct starfive_cryp_ctx *ctx)
case STARFIVE_AES_MODE_GCM:
starfive_aes_set_alen(ctx);
starfive_aes_set_mlen(ctx);
- starfive_aes_set_ivlen(ctx);
+ writel(GCM_AES_IV_SIZE, cryp->base + STARFIVE_AES_IVLEN);
starfive_aes_aead_hw_start(ctx, hw_mode);
starfive_aes_write_iv(ctx, (void *)cryp->req.areq->iv);
break;
@@ -300,52 +289,49 @@ static int starfive_aes_hw_init(struct starfive_cryp_ctx *ctx)
return cryp->err;
}
-static int starfive_aes_read_authtag(struct starfive_cryp_dev *cryp)
+static int starfive_aes_read_authtag(struct starfive_cryp_ctx *ctx)
{
- int i, start_addr;
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ int i;
if (starfive_aes_wait_busy(cryp))
return dev_err_probe(cryp->dev, -ETIMEDOUT,
"Timeout waiting for tag generation.");
- start_addr = STARFIVE_AES_NONCE0;
-
- if (is_gcm(cryp))
- for (i = 0; i < AES_BLOCK_32; i++, start_addr += 4)
- cryp->tag_out[i] = readl(cryp->base + start_addr);
- else
+ if ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_GCM) {
+ cryp->tag_out[0] = readl(cryp->base + STARFIVE_AES_NONCE0);
+ cryp->tag_out[1] = readl(cryp->base + STARFIVE_AES_NONCE1);
+ cryp->tag_out[2] = readl(cryp->base + STARFIVE_AES_NONCE2);
+ cryp->tag_out[3] = readl(cryp->base + STARFIVE_AES_NONCE3);
+ } else {
for (i = 0; i < AES_BLOCK_32; i++)
cryp->tag_out[i] = readl(cryp->base + STARFIVE_AES_AESDIO0R);
+ }
if (is_encrypt(cryp)) {
- scatterwalk_copychunks(cryp->tag_out, &cryp->out_walk, cryp->authsize, 1);
+ scatterwalk_map_and_copy(cryp->tag_out, rctx->out_sg,
+ cryp->total_in, cryp->authsize, 1);
} else {
- scatterwalk_copychunks(cryp->tag_in, &cryp->in_walk, cryp->authsize, 0);
-
if (crypto_memneq(cryp->tag_in, cryp->tag_out, cryp->authsize))
- return dev_err_probe(cryp->dev, -EBADMSG, "Failed tag verification\n");
+ return -EBADMSG;
}
return 0;
}
-static void starfive_aes_finish_req(struct starfive_cryp_dev *cryp)
+static void starfive_aes_finish_req(struct starfive_cryp_ctx *ctx)
{
- union starfive_aes_csr csr;
+ struct starfive_cryp_dev *cryp = ctx->cryp;
int err = cryp->err;
if (!err && cryp->authsize)
- err = starfive_aes_read_authtag(cryp);
+ err = starfive_aes_read_authtag(ctx);
if (!err && ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CBC ||
(cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CTR))
starfive_aes_get_iv(cryp, (void *)cryp->req.sreq->iv);
- /* reset irq flags*/
- csr.v = 0;
- csr.aesrst = 1;
- writel(csr.v, cryp->base + STARFIVE_AES_CSR);
-
if (cryp->authsize)
crypto_finalize_aead_request(cryp->engine, cryp->req.areq, err);
else
@@ -353,39 +339,6 @@ static void starfive_aes_finish_req(struct starfive_cryp_dev *cryp)
err);
}
-void starfive_aes_done_task(unsigned long param)
-{
- struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)param;
- u32 block[AES_BLOCK_32];
- u32 stat;
- int i;
-
- for (i = 0; i < AES_BLOCK_32; i++)
- block[i] = readl(cryp->base + STARFIVE_AES_AESDIO0R);
-
- scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, AES_BLOCK_SIZE,
- cryp->total_out), 1);
-
- cryp->total_out -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_out);
-
- if (!cryp->total_out) {
- starfive_aes_finish_req(cryp);
- return;
- }
-
- memset(block, 0, AES_BLOCK_SIZE);
- scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE,
- cryp->total_in), 0);
- cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in);
-
- for (i = 0; i < AES_BLOCK_32; i++)
- writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R);
-
- stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
- stat &= ~STARFIVE_IE_MASK_AES_DONE;
- writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
-}
-
static int starfive_aes_gcm_write_adata(struct starfive_cryp_ctx *ctx)
{
struct starfive_cryp_dev *cryp = ctx->cryp;
@@ -451,60 +404,165 @@ static int starfive_aes_ccm_write_adata(struct starfive_cryp_ctx *ctx)
return 0;
}
-static int starfive_aes_prepare_req(struct skcipher_request *req,
- struct aead_request *areq)
+static void starfive_aes_dma_done(void *param)
{
- struct starfive_cryp_ctx *ctx;
- struct starfive_cryp_request_ctx *rctx;
- struct starfive_cryp_dev *cryp;
+ struct starfive_cryp_dev *cryp = param;
- if (!req && !areq)
- return -EINVAL;
+ complete(&cryp->dma_done);
+}
- ctx = req ? crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)) :
- crypto_aead_ctx(crypto_aead_reqtfm(areq));
+static void starfive_aes_dma_init(struct starfive_cryp_dev *cryp)
+{
+ cryp->cfg_in.direction = DMA_MEM_TO_DEV;
+ cryp->cfg_in.src_addr_width = DMA_SLAVE_BUSWIDTH_16_BYTES;
+ cryp->cfg_in.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cryp->cfg_in.src_maxburst = cryp->dma_maxburst;
+ cryp->cfg_in.dst_maxburst = cryp->dma_maxburst;
+ cryp->cfg_in.dst_addr = cryp->phys_base + STARFIVE_ALG_FIFO_OFFSET;
- cryp = ctx->cryp;
- rctx = req ? skcipher_request_ctx(req) : aead_request_ctx(areq);
+ dmaengine_slave_config(cryp->tx, &cryp->cfg_in);
- if (req) {
- cryp->req.sreq = req;
- cryp->total_in = req->cryptlen;
- cryp->total_out = req->cryptlen;
- cryp->assoclen = 0;
- cryp->authsize = 0;
- } else {
- cryp->req.areq = areq;
- cryp->assoclen = areq->assoclen;
- cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq));
- if (is_encrypt(cryp)) {
- cryp->total_in = areq->cryptlen;
- cryp->total_out = areq->cryptlen;
- } else {
- cryp->total_in = areq->cryptlen - cryp->authsize;
- cryp->total_out = cryp->total_in;
- }
- }
+ cryp->cfg_out.direction = DMA_DEV_TO_MEM;
+ cryp->cfg_out.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cryp->cfg_out.dst_addr_width = DMA_SLAVE_BUSWIDTH_16_BYTES;
+ cryp->cfg_out.src_maxburst = 4;
+ cryp->cfg_out.dst_maxburst = 4;
+ cryp->cfg_out.src_addr = cryp->phys_base + STARFIVE_ALG_FIFO_OFFSET;
- rctx->in_sg = req ? req->src : areq->src;
- scatterwalk_start(&cryp->in_walk, rctx->in_sg);
+ dmaengine_slave_config(cryp->rx, &cryp->cfg_out);
- rctx->out_sg = req ? req->dst : areq->dst;
- scatterwalk_start(&cryp->out_walk, rctx->out_sg);
+ init_completion(&cryp->dma_done);
+}
- if (cryp->assoclen) {
- rctx->adata = kzalloc(cryp->assoclen + AES_BLOCK_SIZE, GFP_KERNEL);
- if (!rctx->adata)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "Failed to alloc memory for adata");
+static int starfive_aes_dma_xfer(struct starfive_cryp_dev *cryp,
+ struct scatterlist *src,
+ struct scatterlist *dst,
+ int len)
+{
+ struct dma_async_tx_descriptor *in_desc, *out_desc;
+ union starfive_alg_cr alg_cr;
+ int ret = 0, in_save, out_save;
+
+ alg_cr.v = 0;
+ alg_cr.start = 1;
+ alg_cr.aes_dma_en = 1;
+ writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
+
+ in_save = sg_dma_len(src);
+ out_save = sg_dma_len(dst);
- scatterwalk_copychunks(rctx->adata, &cryp->in_walk, cryp->assoclen, 0);
- scatterwalk_copychunks(NULL, &cryp->out_walk, cryp->assoclen, 2);
+ writel(ALIGN(len, AES_BLOCK_SIZE), cryp->base + STARFIVE_DMA_IN_LEN_OFFSET);
+ writel(ALIGN(len, AES_BLOCK_SIZE), cryp->base + STARFIVE_DMA_OUT_LEN_OFFSET);
+
+ sg_dma_len(src) = ALIGN(len, AES_BLOCK_SIZE);
+ sg_dma_len(dst) = ALIGN(len, AES_BLOCK_SIZE);
+
+ out_desc = dmaengine_prep_slave_sg(cryp->rx, dst, 1, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!out_desc) {
+ ret = -EINVAL;
+ goto dma_err;
}
- ctx->rctx = rctx;
+ out_desc->callback = starfive_aes_dma_done;
+ out_desc->callback_param = cryp;
+
+ reinit_completion(&cryp->dma_done);
+ dmaengine_submit(out_desc);
+ dma_async_issue_pending(cryp->rx);
+
+ in_desc = dmaengine_prep_slave_sg(cryp->tx, src, 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!in_desc) {
+ ret = -EINVAL;
+ goto dma_err;
+ }
+
+ dmaengine_submit(in_desc);
+ dma_async_issue_pending(cryp->tx);
+
+ if (!wait_for_completion_timeout(&cryp->dma_done,
+ msecs_to_jiffies(1000)))
+ ret = -ETIMEDOUT;
+
+dma_err:
+ sg_dma_len(src) = in_save;
+ sg_dma_len(dst) = out_save;
+
+ alg_cr.v = 0;
+ alg_cr.clear = 1;
+ writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
+
+ return ret;
+}
+
+static int starfive_aes_map_sg(struct starfive_cryp_dev *cryp,
+ struct scatterlist *src,
+ struct scatterlist *dst)
+{
+ struct scatterlist *stsg, *dtsg;
+ struct scatterlist _src[2], _dst[2];
+ unsigned int remain = cryp->total_in;
+ unsigned int len, src_nents, dst_nents;
+ int ret;
+
+ if (src == dst) {
+ for (stsg = src, dtsg = dst; remain > 0;
+ stsg = sg_next(stsg), dtsg = sg_next(dtsg)) {
+ src_nents = dma_map_sg(cryp->dev, stsg, 1, DMA_BIDIRECTIONAL);
+ if (src_nents == 0)
+ return dev_err_probe(cryp->dev, -ENOMEM,
+ "dma_map_sg error\n");
+
+ dst_nents = src_nents;
+ len = min(sg_dma_len(stsg), remain);
+
+ ret = starfive_aes_dma_xfer(cryp, stsg, dtsg, len);
+ dma_unmap_sg(cryp->dev, stsg, 1, DMA_BIDIRECTIONAL);
+ if (ret)
+ return ret;
+
+ remain -= len;
+ }
+ } else {
+ for (stsg = src, dtsg = dst;;) {
+ src_nents = dma_map_sg(cryp->dev, stsg, 1, DMA_TO_DEVICE);
+ if (src_nents == 0)
+ return dev_err_probe(cryp->dev, -ENOMEM,
+ "dma_map_sg src error\n");
+
+ dst_nents = dma_map_sg(cryp->dev, dtsg, 1, DMA_FROM_DEVICE);
+ if (dst_nents == 0)
+ return dev_err_probe(cryp->dev, -ENOMEM,
+ "dma_map_sg dst error\n");
+
+ len = min(sg_dma_len(stsg), sg_dma_len(dtsg));
+ len = min(len, remain);
+
+ ret = starfive_aes_dma_xfer(cryp, stsg, dtsg, len);
+ dma_unmap_sg(cryp->dev, stsg, 1, DMA_TO_DEVICE);
+ dma_unmap_sg(cryp->dev, dtsg, 1, DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+
+ remain -= len;
+ if (remain == 0)
+ break;
+
+ if (sg_dma_len(stsg) - len) {
+ stsg = scatterwalk_ffwd(_src, stsg, len);
+ dtsg = sg_next(dtsg);
+ } else if (sg_dma_len(dtsg) - len) {
+ dtsg = scatterwalk_ffwd(_dst, dtsg, len);
+ stsg = sg_next(stsg);
+ } else {
+ stsg = sg_next(stsg);
+ dtsg = sg_next(dtsg);
+ }
+ }
+ }
- return starfive_aes_hw_init(ctx);
+ return 0;
}
static int starfive_aes_do_one_req(struct crypto_engine *engine, void *areq)
@@ -513,35 +571,42 @@ static int starfive_aes_do_one_req(struct crypto_engine *engine, void *areq)
container_of(areq, struct skcipher_request, base);
struct starfive_cryp_ctx *ctx =
crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct starfive_cryp_request_ctx *rctx = skcipher_request_ctx(req);
struct starfive_cryp_dev *cryp = ctx->cryp;
- u32 block[AES_BLOCK_32];
- u32 stat;
- int err;
- int i;
+ int ret;
- err = starfive_aes_prepare_req(req, NULL);
- if (err)
- return err;
+ cryp->req.sreq = req;
+ cryp->total_in = req->cryptlen;
+ cryp->total_out = req->cryptlen;
+ cryp->assoclen = 0;
+ cryp->authsize = 0;
- /*
- * Write first plain/ciphertext block to start the module
- * then let irq tasklet handle the rest of the data blocks.
- */
- scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE,
- cryp->total_in), 0);
- cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in);
+ rctx->in_sg = req->src;
+ rctx->out_sg = req->dst;
+
+ ctx->rctx = rctx;
+
+ ret = starfive_aes_hw_init(ctx);
+ if (ret)
+ return ret;
- for (i = 0; i < AES_BLOCK_32; i++)
- writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R);
+ if (!cryp->total_in)
+ goto finish_req;
- stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
- stat &= ~STARFIVE_IE_MASK_AES_DONE;
- writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
+ starfive_aes_dma_init(cryp);
+
+ ret = starfive_aes_map_sg(cryp, rctx->in_sg, rctx->out_sg);
+ if (ret)
+ return ret;
+
+finish_req:
+ starfive_aes_finish_req(ctx);
return 0;
}
-static int starfive_aes_init_tfm(struct crypto_skcipher *tfm)
+static int starfive_aes_init_tfm(struct crypto_skcipher *tfm,
+ const char *alg_name)
{
struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -549,12 +614,26 @@ static int starfive_aes_init_tfm(struct crypto_skcipher *tfm)
if (!ctx->cryp)
return -ENODEV;
+ ctx->skcipher_fbk = crypto_alloc_skcipher(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->skcipher_fbk))
+ return dev_err_probe(ctx->cryp->dev, PTR_ERR(ctx->skcipher_fbk),
+ "%s() failed to allocate fallback for %s\n",
+ __func__, alg_name);
+
crypto_skcipher_set_reqsize(tfm, sizeof(struct starfive_cryp_request_ctx) +
- sizeof(struct skcipher_request));
+ crypto_skcipher_reqsize(ctx->skcipher_fbk));
return 0;
}
+static void starfive_aes_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_skcipher(ctx->skcipher_fbk);
+}
+
static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq)
{
struct aead_request *req =
@@ -562,79 +641,99 @@ static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq
struct starfive_cryp_ctx *ctx =
crypto_aead_ctx(crypto_aead_reqtfm(req));
struct starfive_cryp_dev *cryp = ctx->cryp;
- struct starfive_cryp_request_ctx *rctx;
- u32 block[AES_BLOCK_32];
- u32 stat;
- int err;
- int i;
+ struct starfive_cryp_request_ctx *rctx = aead_request_ctx(req);
+ struct scatterlist _src[2], _dst[2];
+ int ret;
+
+ cryp->req.areq = req;
+ cryp->assoclen = req->assoclen;
+ cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
+
+ rctx->in_sg = scatterwalk_ffwd(_src, req->src, cryp->assoclen);
+ if (req->src == req->dst)
+ rctx->out_sg = rctx->in_sg;
+ else
+ rctx->out_sg = scatterwalk_ffwd(_dst, req->dst, cryp->assoclen);
+
+ if (is_encrypt(cryp)) {
+ cryp->total_in = req->cryptlen;
+ cryp->total_out = req->cryptlen;
+ } else {
+ cryp->total_in = req->cryptlen - cryp->authsize;
+ cryp->total_out = cryp->total_in;
+ scatterwalk_map_and_copy(cryp->tag_in, req->src,
+ cryp->total_in + cryp->assoclen,
+ cryp->authsize, 0);
+ }
- err = starfive_aes_prepare_req(NULL, req);
- if (err)
- return err;
+ if (cryp->assoclen) {
+ rctx->adata = kzalloc(cryp->assoclen + AES_BLOCK_SIZE, GFP_KERNEL);
+ if (!rctx->adata)
+ return dev_err_probe(cryp->dev, -ENOMEM,
+ "Failed to alloc memory for adata");
+
+ if (sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, cryp->assoclen),
+ rctx->adata, cryp->assoclen) != cryp->assoclen)
+ return -EINVAL;
+ }
+
+ if (cryp->total_in)
+ sg_zero_buffer(rctx->in_sg, sg_nents(rctx->in_sg),
+ sg_dma_len(rctx->in_sg) - cryp->total_in,
+ cryp->total_in);
- rctx = ctx->rctx;
+ ctx->rctx = rctx;
+
+ ret = starfive_aes_hw_init(ctx);
+ if (ret)
+ return ret;
if (!cryp->assoclen)
goto write_text;
if ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CCM)
- cryp->err = starfive_aes_ccm_write_adata(ctx);
+ ret = starfive_aes_ccm_write_adata(ctx);
else
- cryp->err = starfive_aes_gcm_write_adata(ctx);
+ ret = starfive_aes_gcm_write_adata(ctx);
kfree(rctx->adata);
- if (cryp->err)
- return cryp->err;
+ if (ret)
+ return ret;
write_text:
if (!cryp->total_in)
goto finish_req;
- /*
- * Write first plain/ciphertext block to start the module
- * then let irq tasklet handle the rest of the data blocks.
- */
- scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE,
- cryp->total_in), 0);
- cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in);
-
- for (i = 0; i < AES_BLOCK_32; i++)
- writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R);
-
- stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
- stat &= ~STARFIVE_IE_MASK_AES_DONE;
- writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
+ starfive_aes_dma_init(cryp);
- return 0;
+ ret = starfive_aes_map_sg(cryp, rctx->in_sg, rctx->out_sg);
+ if (ret)
+ return ret;
finish_req:
- starfive_aes_finish_req(cryp);
+ starfive_aes_finish_req(ctx);
return 0;
}
-static int starfive_aes_aead_init_tfm(struct crypto_aead *tfm)
+static int starfive_aes_aead_init_tfm(struct crypto_aead *tfm,
+ const char *alg_name)
{
struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm);
- struct starfive_cryp_dev *cryp = ctx->cryp;
- struct crypto_tfm *aead = crypto_aead_tfm(tfm);
- struct crypto_alg *alg = aead->__crt_alg;
ctx->cryp = starfive_cryp_find_dev(ctx);
if (!ctx->cryp)
return -ENODEV;
- if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
- ctx->aead_fbk = crypto_alloc_aead(alg->cra_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(ctx->aead_fbk))
- return dev_err_probe(cryp->dev, PTR_ERR(ctx->aead_fbk),
- "%s() failed to allocate fallback for %s\n",
- __func__, alg->cra_name);
- }
+ ctx->aead_fbk = crypto_alloc_aead(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->aead_fbk))
+ return dev_err_probe(ctx->cryp->dev, PTR_ERR(ctx->aead_fbk),
+ "%s() failed to allocate fallback for %s\n",
+ __func__, alg_name);
- crypto_aead_set_reqsize(tfm, sizeof(struct starfive_cryp_ctx) +
- sizeof(struct aead_request));
+ crypto_aead_set_reqsize(tfm, sizeof(struct starfive_cryp_request_ctx) +
+ crypto_aead_reqsize(ctx->aead_fbk));
return 0;
}
@@ -646,6 +745,46 @@ static void starfive_aes_aead_exit_tfm(struct crypto_aead *tfm)
crypto_free_aead(ctx->aead_fbk);
}
+static bool starfive_aes_check_unaligned(struct starfive_cryp_dev *cryp,
+ struct scatterlist *src,
+ struct scatterlist *dst)
+{
+ struct scatterlist *tsg;
+ int i;
+
+ for_each_sg(src, tsg, sg_nents(src), i)
+ if (!IS_ALIGNED(tsg->offset, sizeof(u32)) ||
+ (!IS_ALIGNED(tsg->length, AES_BLOCK_SIZE) &&
+ !sg_is_last(tsg)))
+ return true;
+
+ if (src != dst)
+ for_each_sg(dst, tsg, sg_nents(dst), i)
+ if (!IS_ALIGNED(tsg->offset, sizeof(u32)) ||
+ (!IS_ALIGNED(tsg->length, AES_BLOCK_SIZE) &&
+ !sg_is_last(tsg)))
+ return true;
+
+ return false;
+}
+
+static int starfive_aes_do_fallback(struct skcipher_request *req, bool enc)
+{
+ struct starfive_cryp_ctx *ctx =
+ crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ skcipher_request_set_tfm(subreq, ctx->skcipher_fbk);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+
+ return enc ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
+}
+
static int starfive_aes_crypt(struct skcipher_request *req, unsigned long flags)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -660,32 +799,54 @@ static int starfive_aes_crypt(struct skcipher_request *req, unsigned long flags)
if (req->cryptlen & blocksize_align)
return -EINVAL;
+ if (starfive_aes_check_unaligned(cryp, req->src, req->dst))
+ return starfive_aes_do_fallback(req, is_encrypt(cryp));
+
return crypto_transfer_skcipher_request_to_engine(cryp->engine, req);
}
+static int starfive_aes_aead_do_fallback(struct aead_request *req, bool enc)
+{
+ struct starfive_cryp_ctx *ctx =
+ crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct aead_request *subreq = aead_request_ctx(req);
+
+ aead_request_set_tfm(subreq, ctx->aead_fbk);
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete,
+ req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ return enc ? crypto_aead_encrypt(subreq) :
+ crypto_aead_decrypt(subreq);
+}
+
static int starfive_aes_aead_crypt(struct aead_request *req, unsigned long flags)
{
struct starfive_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct scatterlist *src, *dst, _src[2], _dst[2];
cryp->flags = flags;
- /*
- * HW engine could not perform CCM tag verification on
- * non-blocksize aligned text, use fallback algo instead
+ /* aes-ccm does not support tag verification for non-aligned text,
+ * use fallback for ccm decryption instead.
*/
- if (ctx->aead_fbk && !is_encrypt(cryp)) {
- struct aead_request *subreq = aead_request_ctx(req);
+ if (((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CCM) &&
+ !is_encrypt(cryp))
+ return starfive_aes_aead_do_fallback(req, 0);
- aead_request_set_tfm(subreq, ctx->aead_fbk);
- aead_request_set_callback(subreq, req->base.flags,
- req->base.complete, req->base.data);
- aead_request_set_crypt(subreq, req->src,
- req->dst, req->cryptlen, req->iv);
- aead_request_set_ad(subreq, req->assoclen);
+ src = scatterwalk_ffwd(_src, req->src, req->assoclen);
- return crypto_aead_decrypt(subreq);
- }
+ if (req->src == req->dst)
+ dst = src;
+ else
+ dst = scatterwalk_ffwd(_dst, req->dst, req->assoclen);
+
+ if (starfive_aes_check_unaligned(cryp, src, dst))
+ return starfive_aes_aead_do_fallback(req, is_encrypt(cryp));
return crypto_transfer_aead_request_to_engine(cryp->engine, req);
}
@@ -706,7 +867,7 @@ static int starfive_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
- return 0;
+ return crypto_skcipher_setkey(ctx->skcipher_fbk, key, keylen);
}
static int starfive_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
@@ -725,16 +886,20 @@ static int starfive_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
- if (ctx->aead_fbk)
- return crypto_aead_setkey(ctx->aead_fbk, key, keylen);
-
- return 0;
+ return crypto_aead_setkey(ctx->aead_fbk, key, keylen);
}
static int starfive_aes_gcm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- return crypto_gcm_check_authsize(authsize);
+ struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
+
+ ret = crypto_gcm_check_authsize(authsize);
+ if (ret)
+ return ret;
+
+ return crypto_aead_setauthsize(ctx->aead_fbk, authsize);
}
static int starfive_aes_ccm_setauthsize(struct crypto_aead *tfm,
@@ -820,9 +985,35 @@ static int starfive_aes_ccm_decrypt(struct aead_request *req)
return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_CCM);
}
+static int starfive_aes_ecb_init_tfm(struct crypto_skcipher *tfm)
+{
+ return starfive_aes_init_tfm(tfm, "ecb(aes-generic)");
+}
+
+static int starfive_aes_cbc_init_tfm(struct crypto_skcipher *tfm)
+{
+ return starfive_aes_init_tfm(tfm, "cbc(aes-generic)");
+}
+
+static int starfive_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
+{
+ return starfive_aes_init_tfm(tfm, "ctr(aes-generic)");
+}
+
+static int starfive_aes_ccm_init_tfm(struct crypto_aead *tfm)
+{
+ return starfive_aes_aead_init_tfm(tfm, "ccm_base(ctr(aes-generic),cbcmac(aes-generic))");
+}
+
+static int starfive_aes_gcm_init_tfm(struct crypto_aead *tfm)
+{
+ return starfive_aes_aead_init_tfm(tfm, "gcm_base(ctr(aes-generic),ghash-generic)");
+}
+
static struct skcipher_engine_alg skcipher_algs[] = {
{
- .base.init = starfive_aes_init_tfm,
+ .base.init = starfive_aes_ecb_init_tfm,
+ .base.exit = starfive_aes_exit_tfm,
.base.setkey = starfive_aes_setkey,
.base.encrypt = starfive_aes_ecb_encrypt,
.base.decrypt = starfive_aes_ecb_decrypt,
@@ -832,7 +1023,8 @@ static struct skcipher_engine_alg skcipher_algs[] = {
.cra_name = "ecb(aes)",
.cra_driver_name = "starfive-ecb-aes",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct starfive_cryp_ctx),
.cra_alignmask = 0xf,
@@ -842,7 +1034,8 @@ static struct skcipher_engine_alg skcipher_algs[] = {
.do_one_request = starfive_aes_do_one_req,
},
}, {
- .base.init = starfive_aes_init_tfm,
+ .base.init = starfive_aes_cbc_init_tfm,
+ .base.exit = starfive_aes_exit_tfm,
.base.setkey = starfive_aes_setkey,
.base.encrypt = starfive_aes_cbc_encrypt,
.base.decrypt = starfive_aes_cbc_decrypt,
@@ -853,7 +1046,8 @@ static struct skcipher_engine_alg skcipher_algs[] = {
.cra_name = "cbc(aes)",
.cra_driver_name = "starfive-cbc-aes",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct starfive_cryp_ctx),
.cra_alignmask = 0xf,
@@ -863,7 +1057,8 @@ static struct skcipher_engine_alg skcipher_algs[] = {
.do_one_request = starfive_aes_do_one_req,
},
}, {
- .base.init = starfive_aes_init_tfm,
+ .base.init = starfive_aes_ctr_init_tfm,
+ .base.exit = starfive_aes_exit_tfm,
.base.setkey = starfive_aes_setkey,
.base.encrypt = starfive_aes_ctr_encrypt,
.base.decrypt = starfive_aes_ctr_decrypt,
@@ -874,7 +1069,8 @@ static struct skcipher_engine_alg skcipher_algs[] = {
.cra_name = "ctr(aes)",
.cra_driver_name = "starfive-ctr-aes",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct starfive_cryp_ctx),
.cra_alignmask = 0xf,
@@ -892,7 +1088,7 @@ static struct aead_engine_alg aead_algs[] = {
.base.setauthsize = starfive_aes_gcm_setauthsize,
.base.encrypt = starfive_aes_gcm_encrypt,
.base.decrypt = starfive_aes_gcm_decrypt,
- .base.init = starfive_aes_aead_init_tfm,
+ .base.init = starfive_aes_gcm_init_tfm,
.base.exit = starfive_aes_aead_exit_tfm,
.base.ivsize = GCM_AES_IV_SIZE,
.base.maxauthsize = AES_BLOCK_SIZE,
@@ -900,7 +1096,8 @@ static struct aead_engine_alg aead_algs[] = {
.cra_name = "gcm(aes)",
.cra_driver_name = "starfive-gcm-aes",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct starfive_cryp_ctx),
.cra_alignmask = 0xf,
@@ -914,7 +1111,7 @@ static struct aead_engine_alg aead_algs[] = {
.base.setauthsize = starfive_aes_ccm_setauthsize,
.base.encrypt = starfive_aes_ccm_encrypt,
.base.decrypt = starfive_aes_ccm_decrypt,
- .base.init = starfive_aes_aead_init_tfm,
+ .base.init = starfive_aes_ccm_init_tfm,
.base.exit = starfive_aes_aead_exit_tfm,
.base.ivsize = AES_BLOCK_SIZE,
.base.maxauthsize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/starfive/jh7110-cryp.c b/drivers/crypto/starfive/jh7110-cryp.c
index 425fddf3a8ab..e4dfed7ee0b0 100644
--- a/drivers/crypto/starfive/jh7110-cryp.c
+++ b/drivers/crypto/starfive/jh7110-cryp.c
@@ -89,34 +89,10 @@ static void starfive_dma_cleanup(struct starfive_cryp_dev *cryp)
dma_release_channel(cryp->rx);
}
-static irqreturn_t starfive_cryp_irq(int irq, void *priv)
-{
- u32 status;
- u32 mask;
- struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)priv;
-
- mask = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
- status = readl(cryp->base + STARFIVE_IE_FLAG_OFFSET);
- if (status & STARFIVE_IE_FLAG_AES_DONE) {
- mask |= STARFIVE_IE_MASK_AES_DONE;
- writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET);
- tasklet_schedule(&cryp->aes_done);
- }
-
- if (status & STARFIVE_IE_FLAG_HASH_DONE) {
- mask |= STARFIVE_IE_MASK_HASH_DONE;
- writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET);
- tasklet_schedule(&cryp->hash_done);
- }
-
- return IRQ_HANDLED;
-}
-
static int starfive_cryp_probe(struct platform_device *pdev)
{
struct starfive_cryp_dev *cryp;
struct resource *res;
- int irq;
int ret;
cryp = devm_kzalloc(&pdev->dev, sizeof(*cryp), GFP_KERNEL);
@@ -131,9 +107,6 @@ static int starfive_cryp_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(cryp->base),
"Error remapping memory for platform device\n");
- tasklet_init(&cryp->aes_done, starfive_aes_done_task, (unsigned long)cryp);
- tasklet_init(&cryp->hash_done, starfive_hash_done_task, (unsigned long)cryp);
-
cryp->phys_base = res->start;
cryp->dma_maxburst = 32;
cryp->side_chan = side_chan;
@@ -153,16 +126,6 @@ static int starfive_cryp_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(cryp->rst),
"Error getting hardware reset line\n");
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- ret = devm_request_irq(&pdev->dev, irq, starfive_cryp_irq, 0, pdev->name,
- (void *)cryp);
- if (ret)
- return dev_err_probe(&pdev->dev, ret,
- "Failed to register interrupt handler\n");
-
clk_prepare_enable(cryp->hclk);
clk_prepare_enable(cryp->ahb);
reset_control_deassert(cryp->rst);
@@ -219,9 +182,6 @@ err_dma_init:
clk_disable_unprepare(cryp->ahb);
reset_control_assert(cryp->rst);
- tasklet_kill(&cryp->aes_done);
- tasklet_kill(&cryp->hash_done);
-
return ret;
}
@@ -233,9 +193,6 @@ static void starfive_cryp_remove(struct platform_device *pdev)
starfive_hash_unregister_algs();
starfive_rsa_unregister_algs();
- tasklet_kill(&cryp->aes_done);
- tasklet_kill(&cryp->hash_done);
-
crypto_engine_stop(cryp->engine);
crypto_engine_exit(cryp->engine);
diff --git a/drivers/crypto/starfive/jh7110-cryp.h b/drivers/crypto/starfive/jh7110-cryp.h
index 6cdf6db5d904..494a74f52706 100644
--- a/drivers/crypto/starfive/jh7110-cryp.h
+++ b/drivers/crypto/starfive/jh7110-cryp.h
@@ -91,6 +91,7 @@ union starfive_hash_csr {
#define STARFIVE_HASH_KEY_DONE BIT(13)
u32 key_done :1;
u32 key_flag :1;
+#define STARFIVE_HASH_HMAC_DONE BIT(15)
u32 hmac_done :1;
#define STARFIVE_HASH_BUSY BIT(16)
u32 busy :1;
@@ -168,6 +169,7 @@ struct starfive_cryp_ctx {
struct crypto_akcipher *akcipher_fbk;
struct crypto_ahash *ahash_fbk;
struct crypto_aead *aead_fbk;
+ struct crypto_skcipher *skcipher_fbk;
};
struct starfive_cryp_dev {
@@ -185,11 +187,8 @@ struct starfive_cryp_dev {
struct dma_chan *rx;
struct dma_slave_config cfg_in;
struct dma_slave_config cfg_out;
- struct scatter_walk in_walk;
- struct scatter_walk out_walk;
struct crypto_engine *engine;
- struct tasklet_struct aes_done;
- struct tasklet_struct hash_done;
+ struct completion dma_done;
size_t assoclen;
size_t total_in;
size_t total_out;
@@ -236,7 +235,4 @@ void starfive_rsa_unregister_algs(void);
int starfive_aes_register_algs(void);
void starfive_aes_unregister_algs(void);
-
-void starfive_hash_done_task(unsigned long param);
-void starfive_aes_done_task(unsigned long param);
#endif
diff --git a/drivers/crypto/starfive/jh7110-hash.c b/drivers/crypto/starfive/jh7110-hash.c
index b6d1808012ca..2c60a1047bc3 100644
--- a/drivers/crypto/starfive/jh7110-hash.c
+++ b/drivers/crypto/starfive/jh7110-hash.c
@@ -36,15 +36,22 @@
#define STARFIVE_HASH_BUFLEN SHA512_BLOCK_SIZE
#define STARFIVE_HASH_RESET 0x2
-static inline int starfive_hash_wait_busy(struct starfive_cryp_ctx *ctx)
+static inline int starfive_hash_wait_busy(struct starfive_cryp_dev *cryp)
{
- struct starfive_cryp_dev *cryp = ctx->cryp;
u32 status;
return readl_relaxed_poll_timeout(cryp->base + STARFIVE_HASH_SHACSR, status,
!(status & STARFIVE_HASH_BUSY), 10, 100000);
}
+static inline int starfive_hash_wait_hmac_done(struct starfive_cryp_dev *cryp)
+{
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->base + STARFIVE_HASH_SHACSR, status,
+ (status & STARFIVE_HASH_HMAC_DONE), 10, 100000);
+}
+
static inline int starfive_hash_wait_key_done(struct starfive_cryp_ctx *ctx)
{
struct starfive_cryp_dev *cryp = ctx->cryp;
@@ -84,64 +91,26 @@ static int starfive_hash_hmac_key(struct starfive_cryp_ctx *ctx)
return 0;
}
-static void starfive_hash_start(void *param)
+static void starfive_hash_start(struct starfive_cryp_dev *cryp)
{
- struct starfive_cryp_ctx *ctx = param;
- struct starfive_cryp_request_ctx *rctx = ctx->rctx;
- struct starfive_cryp_dev *cryp = ctx->cryp;
- union starfive_alg_cr alg_cr;
union starfive_hash_csr csr;
- u32 stat;
-
- dma_unmap_sg(cryp->dev, rctx->in_sg, rctx->in_sg_len, DMA_TO_DEVICE);
-
- alg_cr.v = 0;
- alg_cr.clear = 1;
-
- writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
csr.v = readl(cryp->base + STARFIVE_HASH_SHACSR);
csr.firstb = 0;
csr.final = 1;
-
- stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
- stat &= ~STARFIVE_IE_MASK_HASH_DONE;
- writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
writel(csr.v, cryp->base + STARFIVE_HASH_SHACSR);
}
-static int starfive_hash_xmit_dma(struct starfive_cryp_ctx *ctx)
+static void starfive_hash_dma_callback(void *param)
{
- struct starfive_cryp_request_ctx *rctx = ctx->rctx;
- struct starfive_cryp_dev *cryp = ctx->cryp;
- struct dma_async_tx_descriptor *in_desc;
- union starfive_alg_cr alg_cr;
- int total_len;
- int ret;
-
- if (!rctx->total) {
- starfive_hash_start(ctx);
- return 0;
- }
+ struct starfive_cryp_dev *cryp = param;
- writel(rctx->total, cryp->base + STARFIVE_DMA_IN_LEN_OFFSET);
-
- total_len = rctx->total;
- total_len = (total_len & 0x3) ? (((total_len >> 2) + 1) << 2) : total_len;
- sg_dma_len(rctx->in_sg) = total_len;
-
- alg_cr.v = 0;
- alg_cr.start = 1;
- alg_cr.hash_dma_en = 1;
-
- writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
-
- ret = dma_map_sg(cryp->dev, rctx->in_sg, rctx->in_sg_len, DMA_TO_DEVICE);
- if (!ret)
- return dev_err_probe(cryp->dev, -EINVAL, "dma_map_sg() error\n");
+ complete(&cryp->dma_done);
+}
- cryp->cfg_in.direction = DMA_MEM_TO_DEV;
- cryp->cfg_in.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+static void starfive_hash_dma_init(struct starfive_cryp_dev *cryp)
+{
+ cryp->cfg_in.src_addr_width = DMA_SLAVE_BUSWIDTH_16_BYTES;
cryp->cfg_in.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
cryp->cfg_in.src_maxburst = cryp->dma_maxburst;
cryp->cfg_in.dst_maxburst = cryp->dma_maxburst;
@@ -149,50 +118,48 @@ static int starfive_hash_xmit_dma(struct starfive_cryp_ctx *ctx)
dmaengine_slave_config(cryp->tx, &cryp->cfg_in);
- in_desc = dmaengine_prep_slave_sg(cryp->tx, rctx->in_sg,
- ret, DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-
- if (!in_desc)
- return -EINVAL;
-
- in_desc->callback = starfive_hash_start;
- in_desc->callback_param = ctx;
-
- dmaengine_submit(in_desc);
- dma_async_issue_pending(cryp->tx);
-
- return 0;
+ init_completion(&cryp->dma_done);
}
-static int starfive_hash_xmit(struct starfive_cryp_ctx *ctx)
+static int starfive_hash_dma_xfer(struct starfive_cryp_dev *cryp,
+ struct scatterlist *sg)
{
- struct starfive_cryp_request_ctx *rctx = ctx->rctx;
- struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct dma_async_tx_descriptor *in_desc;
+ union starfive_alg_cr alg_cr;
int ret = 0;
- rctx->csr.hash.v = 0;
- rctx->csr.hash.reset = 1;
- writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR);
-
- if (starfive_hash_wait_busy(ctx))
- return dev_err_probe(cryp->dev, -ETIMEDOUT, "Error resetting engine.\n");
+ alg_cr.v = 0;
+ alg_cr.start = 1;
+ alg_cr.hash_dma_en = 1;
+ writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
- rctx->csr.hash.v = 0;
- rctx->csr.hash.mode = ctx->hash_mode;
- rctx->csr.hash.ie = 1;
+ writel(sg_dma_len(sg), cryp->base + STARFIVE_DMA_IN_LEN_OFFSET);
+ sg_dma_len(sg) = ALIGN(sg_dma_len(sg), sizeof(u32));
- if (ctx->is_hmac) {
- ret = starfive_hash_hmac_key(ctx);
- if (ret)
- return ret;
- } else {
- rctx->csr.hash.start = 1;
- rctx->csr.hash.firstb = 1;
- writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR);
+ in_desc = dmaengine_prep_slave_sg(cryp->tx, sg, 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!in_desc) {
+ ret = -EINVAL;
+ goto end;
}
- return starfive_hash_xmit_dma(ctx);
+ reinit_completion(&cryp->dma_done);
+ in_desc->callback = starfive_hash_dma_callback;
+ in_desc->callback_param = cryp;
+
+ dmaengine_submit(in_desc);
+ dma_async_issue_pending(cryp->tx);
+
+ if (!wait_for_completion_timeout(&cryp->dma_done,
+ msecs_to_jiffies(1000)))
+ ret = -ETIMEDOUT;
+
+end:
+ alg_cr.v = 0;
+ alg_cr.clear = 1;
+ writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET);
+
+ return ret;
}
static int starfive_hash_copy_hash(struct ahash_request *req)
@@ -215,58 +182,74 @@ static int starfive_hash_copy_hash(struct ahash_request *req)
return 0;
}
-void starfive_hash_done_task(unsigned long param)
+static void starfive_hash_done_task(struct starfive_cryp_dev *cryp)
{
- struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)param;
int err = cryp->err;
if (!err)
err = starfive_hash_copy_hash(cryp->req.hreq);
- /* Reset to clear hash_done in irq register*/
- writel(STARFIVE_HASH_RESET, cryp->base + STARFIVE_HASH_SHACSR);
-
crypto_finalize_hash_request(cryp->engine, cryp->req.hreq, err);
}
-static int starfive_hash_check_aligned(struct scatterlist *sg, size_t total, size_t align)
+static int starfive_hash_one_request(struct crypto_engine *engine, void *areq)
{
- int len = 0;
+ struct ahash_request *req = container_of(areq, struct ahash_request,
+ base);
+ struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct scatterlist *tsg;
+ int ret, src_nents, i;
- if (!total)
- return 0;
+ writel(STARFIVE_HASH_RESET, cryp->base + STARFIVE_HASH_SHACSR);
- if (!IS_ALIGNED(total, align))
- return -EINVAL;
+ if (starfive_hash_wait_busy(cryp))
+ return dev_err_probe(cryp->dev, -ETIMEDOUT, "Error resetting hardware\n");
- while (sg) {
- if (!IS_ALIGNED(sg->offset, sizeof(u32)))
- return -EINVAL;
+ rctx->csr.hash.v = 0;
+ rctx->csr.hash.mode = ctx->hash_mode;
- if (!IS_ALIGNED(sg->length, align))
- return -EINVAL;
+ if (ctx->is_hmac) {
+ ret = starfive_hash_hmac_key(ctx);
+ if (ret)
+ return ret;
+ } else {
+ rctx->csr.hash.start = 1;
+ rctx->csr.hash.firstb = 1;
+ writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR);
+ }
+
+ /* No input message, get digest and end. */
+ if (!rctx->total)
+ goto hash_start;
+
+ starfive_hash_dma_init(cryp);
+
+ for_each_sg(rctx->in_sg, tsg, rctx->in_sg_len, i) {
+ src_nents = dma_map_sg(cryp->dev, tsg, 1, DMA_TO_DEVICE);
+ if (src_nents == 0)
+ return dev_err_probe(cryp->dev, -ENOMEM,
+ "dma_map_sg error\n");
- len += sg->length;
- sg = sg_next(sg);
+ ret = starfive_hash_dma_xfer(cryp, tsg);
+ dma_unmap_sg(cryp->dev, tsg, 1, DMA_TO_DEVICE);
+ if (ret)
+ return ret;
}
- if (len != total)
- return -EINVAL;
+hash_start:
+ starfive_hash_start(cryp);
- return 0;
-}
+ if (starfive_hash_wait_busy(cryp))
+ return dev_err_probe(cryp->dev, -ETIMEDOUT, "Error generating digest\n");
-static int starfive_hash_one_request(struct crypto_engine *engine, void *areq)
-{
- struct ahash_request *req = container_of(areq, struct ahash_request,
- base);
- struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct starfive_cryp_dev *cryp = ctx->cryp;
+ if (ctx->is_hmac)
+ cryp->err = starfive_hash_wait_hmac_done(cryp);
- if (!cryp)
- return -ENODEV;
+ starfive_hash_done_task(cryp);
- return starfive_hash_xmit(ctx);
+ return 0;
}
static int starfive_hash_init(struct ahash_request *req)
@@ -337,22 +320,6 @@ static int starfive_hash_finup(struct ahash_request *req)
return crypto_ahash_finup(&rctx->ahash_fbk_req);
}
-static int starfive_hash_digest_fb(struct ahash_request *req)
-{
- struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk);
- ahash_request_set_callback(&rctx->ahash_fbk_req, req->base.flags,
- req->base.complete, req->base.data);
-
- ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src,
- req->result, req->nbytes);
-
- return crypto_ahash_digest(&rctx->ahash_fbk_req);
-}
-
static int starfive_hash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -370,9 +337,6 @@ static int starfive_hash_digest(struct ahash_request *req)
rctx->in_sg_len = sg_nents_for_len(rctx->in_sg, rctx->total);
ctx->rctx = rctx;
- if (starfive_hash_check_aligned(rctx->in_sg, rctx->total, rctx->blksize))
- return starfive_hash_digest_fb(req);
-
return crypto_transfer_hash_request_to_engine(cryp->engine, req);
}
@@ -406,7 +370,8 @@ static int starfive_hash_import(struct ahash_request *req, const void *in)
static int starfive_hash_init_tfm(struct crypto_ahash *hash,
const char *alg_name,
- unsigned int mode)
+ unsigned int mode,
+ bool is_hmac)
{
struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
@@ -426,7 +391,7 @@ static int starfive_hash_init_tfm(struct crypto_ahash *hash,
crypto_ahash_set_reqsize(hash, sizeof(struct starfive_cryp_request_ctx) +
crypto_ahash_reqsize(ctx->ahash_fbk));
- ctx->keylen = 0;
+ ctx->is_hmac = is_hmac;
ctx->hash_mode = mode;
return 0;
@@ -529,81 +494,61 @@ static int starfive_hash_setkey(struct crypto_ahash *hash,
static int starfive_sha224_init_tfm(struct crypto_ahash *hash)
{
return starfive_hash_init_tfm(hash, "sha224-generic",
- STARFIVE_HASH_SHA224);
+ STARFIVE_HASH_SHA224, 0);
}
static int starfive_sha256_init_tfm(struct crypto_ahash *hash)
{
return starfive_hash_init_tfm(hash, "sha256-generic",
- STARFIVE_HASH_SHA256);
+ STARFIVE_HASH_SHA256, 0);
}
static int starfive_sha384_init_tfm(struct crypto_ahash *hash)
{
return starfive_hash_init_tfm(hash, "sha384-generic",
- STARFIVE_HASH_SHA384);
+ STARFIVE_HASH_SHA384, 0);
}
static int starfive_sha512_init_tfm(struct crypto_ahash *hash)
{
return starfive_hash_init_tfm(hash, "sha512-generic",
- STARFIVE_HASH_SHA512);
+ STARFIVE_HASH_SHA512, 0);
}
static int starfive_sm3_init_tfm(struct crypto_ahash *hash)
{
return starfive_hash_init_tfm(hash, "sm3-generic",
- STARFIVE_HASH_SM3);
+ STARFIVE_HASH_SM3, 0);
}
static int starfive_hmac_sha224_init_tfm(struct crypto_ahash *hash)
{
- struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
-
- ctx->is_hmac = true;
-
return starfive_hash_init_tfm(hash, "hmac(sha224-generic)",
- STARFIVE_HASH_SHA224);
+ STARFIVE_HASH_SHA224, 1);
}
static int starfive_hmac_sha256_init_tfm(struct crypto_ahash *hash)
{
- struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
-
- ctx->is_hmac = true;
-
return starfive_hash_init_tfm(hash, "hmac(sha256-generic)",
- STARFIVE_HASH_SHA256);
+ STARFIVE_HASH_SHA256, 1);
}
static int starfive_hmac_sha384_init_tfm(struct crypto_ahash *hash)
{
- struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
-
- ctx->is_hmac = true;
-
return starfive_hash_init_tfm(hash, "hmac(sha384-generic)",
- STARFIVE_HASH_SHA384);
+ STARFIVE_HASH_SHA384, 1);
}
static int starfive_hmac_sha512_init_tfm(struct crypto_ahash *hash)
{
- struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
-
- ctx->is_hmac = true;
-
return starfive_hash_init_tfm(hash, "hmac(sha512-generic)",
- STARFIVE_HASH_SHA512);
+ STARFIVE_HASH_SHA512, 1);
}
static int starfive_hmac_sm3_init_tfm(struct crypto_ahash *hash)
{
- struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
-
- ctx->is_hmac = true;
-
return starfive_hash_init_tfm(hash, "hmac(sm3-generic)",
- STARFIVE_HASH_SM3);
+ STARFIVE_HASH_SM3, 1);
}
static struct ahash_engine_alg algs_sha2_sm3[] = {
diff --git a/drivers/crypto/starfive/jh7110-rsa.c b/drivers/crypto/starfive/jh7110-rsa.c
index cf8bda7f0855..33093ba4b13a 100644
--- a/drivers/crypto/starfive/jh7110-rsa.c
+++ b/drivers/crypto/starfive/jh7110-rsa.c
@@ -45,6 +45,9 @@ static inline int starfive_pka_wait_done(struct starfive_cryp_ctx *ctx)
static void starfive_rsa_free_key(struct starfive_rsa_key *key)
{
+ if (!key->key_sz)
+ return;
+
kfree_sensitive(key->d);
kfree_sensitive(key->e);
kfree_sensitive(key->n);
@@ -273,7 +276,6 @@ static int starfive_rsa_enc_core(struct starfive_cryp_ctx *ctx, int enc)
err_rsa_crypt:
writel(STARFIVE_RSA_RESET, cryp->base + STARFIVE_PKA_CACR_OFFSET);
- kfree(rctx->rsa_data);
return ret;
}
@@ -534,16 +536,14 @@ static int starfive_rsa_init_tfm(struct crypto_akcipher *tfm)
{
struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ ctx->cryp = starfive_cryp_find_dev(ctx);
+ if (!ctx->cryp)
+ return -ENODEV;
+
ctx->akcipher_fbk = crypto_alloc_akcipher("rsa-generic", 0, 0);
if (IS_ERR(ctx->akcipher_fbk))
return PTR_ERR(ctx->akcipher_fbk);
- ctx->cryp = starfive_cryp_find_dev(ctx);
- if (!ctx->cryp) {
- crypto_free_akcipher(ctx->akcipher_fbk);
- return -ENODEV;
- }
-
akcipher_set_reqsize(tfm, sizeof(struct starfive_cryp_request_ctx) +
sizeof(struct crypto_akcipher) + 32);
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 34e0d7e381a8..351827372ea6 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -94,6 +94,7 @@
#define HASH_FLAGS_ERRORS BIT(21)
#define HASH_FLAGS_EMPTY BIT(22)
#define HASH_FLAGS_HMAC BIT(23)
+#define HASH_FLAGS_SGS_COPIED BIT(24)
#define HASH_OP_UPDATE 1
#define HASH_OP_FINAL 2
@@ -145,7 +146,7 @@ struct stm32_hash_state {
u16 bufcnt;
u16 blocklen;
- u8 buffer[HASH_BUFLEN] __aligned(4);
+ u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
/* hash state */
u32 hw_context[3 + HASH_CSR_NB_MAX];
@@ -158,8 +159,8 @@ struct stm32_hash_request_ctx {
u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
size_t digcnt;
- /* DMA */
struct scatterlist *sg;
+ struct scatterlist sgl[2]; /* scatterlist used to realize alignment */
unsigned int offset;
unsigned int total;
struct scatterlist sg_key;
@@ -184,6 +185,7 @@ struct stm32_hash_pdata {
size_t algs_info_size;
bool has_sr;
bool has_mdmat;
+ bool context_secured;
bool broken_emptymsg;
bool ux500;
};
@@ -195,6 +197,7 @@ struct stm32_hash_dev {
struct reset_control *rst;
void __iomem *io_base;
phys_addr_t phys_base;
+ u8 xmit_buf[HASH_BUFLEN] __aligned(sizeof(u32));
u32 dma_mode;
bool polled;
@@ -220,6 +223,8 @@ static struct stm32_hash_drv stm32_hash = {
};
static void stm32_hash_dma_callback(void *param);
+static int stm32_hash_prepare_request(struct ahash_request *req);
+static void stm32_hash_unprepare_request(struct ahash_request *req);
static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
{
@@ -232,6 +237,11 @@ static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
writel_relaxed(value, hdev->io_base + offset);
}
+/**
+ * stm32_hash_wait_busy - wait until hash processor is available. It return an
+ * error if the hash core is processing a block of data for more than 10 ms.
+ * @hdev: the stm32_hash_dev device.
+ */
static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
{
u32 status;
@@ -245,6 +255,11 @@ static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
!(status & HASH_SR_BUSY), 10, 10000);
}
+/**
+ * stm32_hash_set_nblw - set the number of valid bytes in the last word.
+ * @hdev: the stm32_hash_dev device.
+ * @length: the length of the final word.
+ */
static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
{
u32 reg;
@@ -282,6 +297,11 @@ static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
return 0;
}
+/**
+ * stm32_hash_write_ctrl - Initialize the hash processor, only if
+ * HASH_FLAGS_INIT is set.
+ * @hdev: the stm32_hash_dev device
+ */
static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
@@ -469,9 +489,7 @@ static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
struct stm32_hash_state *state = &rctx->state;
- u32 *preg = state->hw_context;
int bufcnt, err = 0, final;
- int i, swap_reg;
dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags);
@@ -495,34 +513,23 @@ static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
return stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 1);
}
- if (!(hdev->flags & HASH_FLAGS_INIT))
- return 0;
-
- if (stm32_hash_wait_busy(hdev))
- return -ETIMEDOUT;
-
- swap_reg = hash_swap_reg(rctx);
-
- if (!hdev->pdata->ux500)
- *preg++ = stm32_hash_read(hdev, HASH_IMR);
- *preg++ = stm32_hash_read(hdev, HASH_STR);
- *preg++ = stm32_hash_read(hdev, HASH_CR);
- for (i = 0; i < swap_reg; i++)
- *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
-
- state->flags |= HASH_FLAGS_INIT;
-
return err;
}
static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
- struct scatterlist *sg, int length, int mdma)
+ struct scatterlist *sg, int length, int mdmat)
{
struct dma_async_tx_descriptor *in_desc;
dma_cookie_t cookie;
u32 reg;
int err;
+ dev_dbg(hdev->dev, "%s mdmat: %x length: %d\n", __func__, mdmat, length);
+
+ /* do not use dma if there is no data to send */
+ if (length <= 0)
+ return 0;
+
in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
DMA_CTRL_ACK);
@@ -535,13 +542,12 @@ static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
in_desc->callback = stm32_hash_dma_callback;
in_desc->callback_param = hdev;
- hdev->flags |= HASH_FLAGS_FINAL;
hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
reg = stm32_hash_read(hdev, HASH_CR);
if (hdev->pdata->has_mdmat) {
- if (mdma)
+ if (mdmat)
reg |= HASH_CR_MDMAT;
else
reg &= ~HASH_CR_MDMAT;
@@ -550,7 +556,6 @@ static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
stm32_hash_write(hdev, HASH_CR, reg);
- stm32_hash_set_nblw(hdev, length);
cookie = dmaengine_submit(in_desc);
err = dma_submit_error(cookie);
@@ -590,7 +595,7 @@ static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
int err;
- if (ctx->keylen < rctx->state.blocklen || hdev->dma_mode == 1) {
+ if (ctx->keylen < rctx->state.blocklen || hdev->dma_mode > 0) {
err = stm32_hash_write_key(hdev);
if (stm32_hash_wait_busy(hdev))
return -ETIMEDOUT;
@@ -655,18 +660,20 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
struct scatterlist sg[1], *tsg;
int err = 0, reg, ncp = 0;
unsigned int i, len = 0, bufcnt = 0;
+ bool final = hdev->flags & HASH_FLAGS_FINAL;
bool is_last = false;
+ u32 last_word;
- rctx->sg = hdev->req->src;
- rctx->total = hdev->req->nbytes;
+ dev_dbg(hdev->dev, "%s total: %d bufcnt: %d final: %d\n",
+ __func__, rctx->total, rctx->state.bufcnt, final);
- rctx->nents = sg_nents(rctx->sg);
if (rctx->nents < 0)
return -EINVAL;
stm32_hash_write_ctrl(hdev);
- if (hdev->flags & HASH_FLAGS_HMAC) {
+ if (hdev->flags & HASH_FLAGS_HMAC && (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
+ hdev->flags |= HASH_FLAGS_HMAC_KEY;
err = stm32_hash_hmac_dma_send(hdev);
if (err != -EINPROGRESS)
return err;
@@ -677,22 +684,36 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
len = sg->length;
if (sg_is_last(sg) || (bufcnt + sg[0].length) >= rctx->total) {
- sg->length = rctx->total - bufcnt;
- is_last = true;
- if (hdev->dma_mode == 1) {
- len = (ALIGN(sg->length, 16) - 16);
-
- ncp = sg_pcopy_to_buffer(
- rctx->sg, rctx->nents,
- rctx->state.buffer, sg->length - len,
- rctx->total - sg->length + len);
-
- sg->length = len;
+ if (!final) {
+ /* Always manually put the last word of a non-final transfer. */
+ len -= sizeof(u32);
+ sg_pcopy_to_buffer(rctx->sg, rctx->nents, &last_word, 4, len);
+ sg->length -= sizeof(u32);
} else {
- if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
- len = sg->length;
- sg->length = ALIGN(sg->length,
- sizeof(u32));
+ /*
+ * In Multiple DMA mode, DMA must be aborted before the final
+ * transfer.
+ */
+ sg->length = rctx->total - bufcnt;
+ if (hdev->dma_mode > 0) {
+ len = (ALIGN(sg->length, 16) - 16);
+
+ ncp = sg_pcopy_to_buffer(rctx->sg, rctx->nents,
+ rctx->state.buffer,
+ sg->length - len,
+ rctx->total - sg->length + len);
+
+ if (!len)
+ break;
+
+ sg->length = len;
+ } else {
+ is_last = true;
+ if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
+ len = sg->length;
+ sg->length = ALIGN(sg->length,
+ sizeof(u32));
+ }
}
}
}
@@ -706,43 +727,67 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
err = stm32_hash_xmit_dma(hdev, sg, len, !is_last);
+ /* The last word of a non final transfer is sent manually. */
+ if (!final) {
+ stm32_hash_write(hdev, HASH_DIN, last_word);
+ len += sizeof(u32);
+ }
+
+ rctx->total -= len;
+
bufcnt += sg[0].length;
dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
- if (err == -ENOMEM)
+ if (err == -ENOMEM || err == -ETIMEDOUT)
return err;
if (is_last)
break;
}
- if (hdev->dma_mode == 1) {
- if (stm32_hash_wait_busy(hdev))
- return -ETIMEDOUT;
- reg = stm32_hash_read(hdev, HASH_CR);
- reg &= ~HASH_CR_DMAE;
- reg |= HASH_CR_DMAA;
- stm32_hash_write(hdev, HASH_CR, reg);
+ /*
+ * When the second last block transfer of 4 words is performed by the DMA,
+ * the software must set the DMA Abort bit (DMAA) to 1 before completing the
+ * last transfer of 4 words or less.
+ */
+ if (final) {
+ if (hdev->dma_mode > 0) {
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+ reg = stm32_hash_read(hdev, HASH_CR);
+ reg &= ~HASH_CR_DMAE;
+ reg |= HASH_CR_DMAA;
+ stm32_hash_write(hdev, HASH_CR, reg);
+
+ if (ncp) {
+ memset(buffer + ncp, 0, 4 - DIV_ROUND_UP(ncp, sizeof(u32)));
+ writesl(hdev->io_base + HASH_DIN, buffer,
+ DIV_ROUND_UP(ncp, sizeof(u32)));
+ }
- if (ncp) {
- memset(buffer + ncp, 0,
- DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
- writesl(hdev->io_base + HASH_DIN, buffer,
- DIV_ROUND_UP(ncp, sizeof(u32)));
+ stm32_hash_set_nblw(hdev, ncp);
+ reg = stm32_hash_read(hdev, HASH_STR);
+ reg |= HASH_STR_DCAL;
+ stm32_hash_write(hdev, HASH_STR, reg);
+ err = -EINPROGRESS;
}
- stm32_hash_set_nblw(hdev, ncp);
- reg = stm32_hash_read(hdev, HASH_STR);
- reg |= HASH_STR_DCAL;
- stm32_hash_write(hdev, HASH_STR, reg);
- err = -EINPROGRESS;
- }
- if (hdev->flags & HASH_FLAGS_HMAC) {
- if (stm32_hash_wait_busy(hdev))
- return -ETIMEDOUT;
- err = stm32_hash_hmac_dma_send(hdev);
+ /*
+ * The hash processor needs the key to be loaded a second time in order
+ * to process the HMAC.
+ */
+ if (hdev->flags & HASH_FLAGS_HMAC) {
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+ err = stm32_hash_hmac_dma_send(hdev);
+ }
+
+ return err;
}
- return err;
+ if (err != -EINPROGRESS)
+ return err;
+
+ return 0;
}
static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
@@ -765,33 +810,6 @@ static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
return hdev;
}
-static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
-{
- struct scatterlist *sg;
- struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
- int i;
-
- if (!hdev->dma_lch || req->nbytes <= rctx->state.blocklen)
- return false;
-
- if (sg_nents(req->src) > 1) {
- if (hdev->dma_mode == 1)
- return false;
- for_each_sg(req->src, sg, sg_nents(req->src), i) {
- if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
- (!sg_is_last(sg)))
- return false;
- }
- }
-
- if (req->src->offset % 4)
- return false;
-
- return true;
-}
-
static int stm32_hash_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -802,8 +820,10 @@ static int stm32_hash_init(struct ahash_request *req)
bool sha3_mode = ctx->flags & HASH_FLAGS_SHA3_MODE;
rctx->hdev = hdev;
+ state->flags = 0;
- state->flags = HASH_FLAGS_CPU;
+ if (!(hdev->dma_lch && hdev->pdata->has_mdmat))
+ state->flags |= HASH_FLAGS_CPU;
if (sha3_mode)
state->flags |= HASH_FLAGS_SHA3_MODE;
@@ -857,6 +877,7 @@ static int stm32_hash_init(struct ahash_request *req)
dev_err(hdev->dev, "Error, block too large");
return -EINVAL;
}
+ rctx->nents = 0;
rctx->total = 0;
rctx->offset = 0;
rctx->data_type = HASH_DATA_8_BITS;
@@ -874,6 +895,9 @@ static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
struct stm32_hash_state *state = &rctx->state;
+ dev_dbg(hdev->dev, "update_req: total: %u, digcnt: %zd, final: 0",
+ rctx->total, rctx->digcnt);
+
if (!(state->flags & HASH_FLAGS_CPU))
return stm32_hash_dma_send(hdev);
@@ -887,6 +911,11 @@ static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
struct stm32_hash_state *state = &rctx->state;
int buflen = state->bufcnt;
+ if (!(state->flags & HASH_FLAGS_CPU)) {
+ hdev->flags |= HASH_FLAGS_FINAL;
+ return stm32_hash_dma_send(hdev);
+ }
+
if (state->flags & HASH_FLAGS_FINUP)
return stm32_hash_update_req(hdev);
@@ -968,15 +997,21 @@ static int stm32_hash_finish(struct ahash_request *req)
static void stm32_hash_finish_req(struct ahash_request *req, int err)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_state *state = &rctx->state;
struct stm32_hash_dev *hdev = rctx->hdev;
+ if (hdev->flags & HASH_FLAGS_DMA_ACTIVE)
+ state->flags |= HASH_FLAGS_DMA_ACTIVE;
+ else
+ state->flags &= ~HASH_FLAGS_DMA_ACTIVE;
+
if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
stm32_hash_copy_hash(req);
err = stm32_hash_finish(req);
}
- pm_runtime_mark_last_busy(hdev->dev);
- pm_runtime_put_autosuspend(hdev->dev);
+ /* Finalized request mist be unprepared here */
+ stm32_hash_unprepare_request(req);
crypto_finalize_hash_request(hdev->engine, req, err);
}
@@ -1006,6 +1041,10 @@ static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
pm_runtime_get_sync(hdev->dev);
+ err = stm32_hash_prepare_request(req);
+ if (err)
+ return err;
+
hdev->req = req;
hdev->flags = 0;
swap_reg = hash_swap_reg(rctx);
@@ -1030,6 +1069,12 @@ static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
if (state->flags & HASH_FLAGS_HMAC)
hdev->flags |= HASH_FLAGS_HMAC |
HASH_FLAGS_HMAC_KEY;
+
+ if (state->flags & HASH_FLAGS_CPU)
+ hdev->flags |= HASH_FLAGS_CPU;
+
+ if (state->flags & HASH_FLAGS_DMA_ACTIVE)
+ hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
}
if (rctx->op == HASH_OP_UPDATE)
@@ -1054,6 +1099,284 @@ static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
return 0;
}
+static int stm32_hash_copy_sgs(struct stm32_hash_request_ctx *rctx,
+ struct scatterlist *sg, int bs,
+ unsigned int new_len)
+{
+ struct stm32_hash_state *state = &rctx->state;
+ int pages;
+ void *buf;
+
+ pages = get_order(new_len);
+
+ buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
+ if (!buf) {
+ pr_err("Couldn't allocate pages for unaligned cases.\n");
+ return -ENOMEM;
+ }
+
+ if (state->bufcnt)
+ memcpy(buf, rctx->hdev->xmit_buf, state->bufcnt);
+
+ scatterwalk_map_and_copy(buf + state->bufcnt, sg, rctx->offset,
+ min(new_len, rctx->total) - state->bufcnt, 0);
+ sg_init_table(rctx->sgl, 1);
+ sg_set_buf(rctx->sgl, buf, new_len);
+ rctx->sg = rctx->sgl;
+ state->flags |= HASH_FLAGS_SGS_COPIED;
+ rctx->nents = 1;
+ rctx->offset += new_len - state->bufcnt;
+ state->bufcnt = 0;
+ rctx->total = new_len;
+
+ return 0;
+}
+
+static int stm32_hash_align_sgs(struct scatterlist *sg,
+ int nbytes, int bs, bool init, bool final,
+ struct stm32_hash_request_ctx *rctx)
+{
+ struct stm32_hash_state *state = &rctx->state;
+ struct stm32_hash_dev *hdev = rctx->hdev;
+ struct scatterlist *sg_tmp = sg;
+ int offset = rctx->offset;
+ int new_len;
+ int n = 0;
+ int bufcnt = state->bufcnt;
+ bool secure_ctx = hdev->pdata->context_secured;
+ bool aligned = true;
+
+ if (!sg || !sg->length || !nbytes) {
+ if (bufcnt) {
+ bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs;
+ sg_init_table(rctx->sgl, 1);
+ sg_set_buf(rctx->sgl, rctx->hdev->xmit_buf, bufcnt);
+ rctx->sg = rctx->sgl;
+ rctx->nents = 1;
+ }
+
+ return 0;
+ }
+
+ new_len = nbytes;
+
+ if (offset)
+ aligned = false;
+
+ if (final) {
+ new_len = DIV_ROUND_UP(new_len, bs) * bs;
+ } else {
+ new_len = (new_len - 1) / bs * bs; // return n block - 1 block
+
+ /*
+ * Context save in some version of HASH IP can only be done when the
+ * FIFO is ready to get a new block. This implies to send n block plus a
+ * 32 bit word in the first DMA send.
+ */
+ if (init && secure_ctx) {
+ new_len += sizeof(u32);
+ if (unlikely(new_len > nbytes))
+ new_len -= bs;
+ }
+ }
+
+ if (!new_len)
+ return 0;
+
+ if (nbytes != new_len)
+ aligned = false;
+
+ while (nbytes > 0 && sg_tmp) {
+ n++;
+
+ if (bufcnt) {
+ if (!IS_ALIGNED(bufcnt, bs)) {
+ aligned = false;
+ break;
+ }
+ nbytes -= bufcnt;
+ bufcnt = 0;
+ if (!nbytes)
+ aligned = false;
+
+ continue;
+ }
+
+ if (offset < sg_tmp->length) {
+ if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
+ aligned = false;
+ break;
+ }
+
+ if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
+ aligned = false;
+ break;
+ }
+ }
+
+ if (offset) {
+ offset -= sg_tmp->length;
+ if (offset < 0) {
+ nbytes += offset;
+ offset = 0;
+ }
+ } else {
+ nbytes -= sg_tmp->length;
+ }
+
+ sg_tmp = sg_next(sg_tmp);
+
+ if (nbytes < 0) {
+ aligned = false;
+ break;
+ }
+ }
+
+ if (!aligned)
+ return stm32_hash_copy_sgs(rctx, sg, bs, new_len);
+
+ rctx->total = new_len;
+ rctx->offset += new_len;
+ rctx->nents = n;
+ if (state->bufcnt) {
+ sg_init_table(rctx->sgl, 2);
+ sg_set_buf(rctx->sgl, rctx->hdev->xmit_buf, state->bufcnt);
+ sg_chain(rctx->sgl, 2, sg);
+ rctx->sg = rctx->sgl;
+ } else {
+ rctx->sg = sg;
+ }
+
+ return 0;
+}
+
+static int stm32_hash_prepare_request(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ struct stm32_hash_state *state = &rctx->state;
+ unsigned int nbytes;
+ int ret, hash_later, bs;
+ bool update = rctx->op & HASH_OP_UPDATE;
+ bool init = !(state->flags & HASH_FLAGS_INIT);
+ bool finup = state->flags & HASH_FLAGS_FINUP;
+ bool final = state->flags & HASH_FLAGS_FINAL;
+
+ if (!hdev->dma_lch || state->flags & HASH_FLAGS_CPU)
+ return 0;
+
+ bs = crypto_ahash_blocksize(tfm);
+
+ nbytes = state->bufcnt;
+
+ /*
+ * In case of update request nbytes must correspond to the content of the
+ * buffer + the offset minus the content of the request already in the
+ * buffer.
+ */
+ if (update || finup)
+ nbytes += req->nbytes - rctx->offset;
+
+ dev_dbg(hdev->dev,
+ "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n",
+ __func__, nbytes, bs, rctx->total, rctx->offset, state->bufcnt);
+
+ if (!nbytes)
+ return 0;
+
+ rctx->total = nbytes;
+
+ if (update && req->nbytes && (!IS_ALIGNED(state->bufcnt, bs))) {
+ int len = bs - state->bufcnt % bs;
+
+ if (len > req->nbytes)
+ len = req->nbytes;
+ scatterwalk_map_and_copy(state->buffer + state->bufcnt, req->src,
+ 0, len, 0);
+ state->bufcnt += len;
+ rctx->offset = len;
+ }
+
+ /* copy buffer in a temporary one that is used for sg alignment */
+ if (state->bufcnt)
+ memcpy(hdev->xmit_buf, state->buffer, state->bufcnt);
+
+ ret = stm32_hash_align_sgs(req->src, nbytes, bs, init, final, rctx);
+ if (ret)
+ return ret;
+
+ hash_later = nbytes - rctx->total;
+ if (hash_later < 0)
+ hash_later = 0;
+
+ if (hash_later && hash_later <= state->blocklen) {
+ scatterwalk_map_and_copy(state->buffer,
+ req->src,
+ req->nbytes - hash_later,
+ hash_later, 0);
+
+ state->bufcnt = hash_later;
+ } else {
+ state->bufcnt = 0;
+ }
+
+ if (hash_later > state->blocklen) {
+ /* FIXME: add support of this case */
+ pr_err("Buffer contains more than one block.\n");
+ return -ENOMEM;
+ }
+
+ rctx->total = min(nbytes, rctx->total);
+
+ return 0;
+}
+
+static void stm32_hash_unprepare_request(struct ahash_request *req)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_state *state = &rctx->state;
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ u32 *preg = state->hw_context;
+ int swap_reg, i;
+
+ if (hdev->dma_lch)
+ dmaengine_terminate_sync(hdev->dma_lch);
+
+ if (state->flags & HASH_FLAGS_SGS_COPIED)
+ free_pages((unsigned long)sg_virt(rctx->sg), get_order(rctx->sg->length));
+
+ rctx->sg = NULL;
+ rctx->offset = 0;
+
+ state->flags &= ~(HASH_FLAGS_SGS_COPIED);
+
+ if (!(hdev->flags & HASH_FLAGS_INIT))
+ goto pm_runtime;
+
+ state->flags |= HASH_FLAGS_INIT;
+
+ if (stm32_hash_wait_busy(hdev)) {
+ dev_warn(hdev->dev, "Wait busy failed.");
+ return;
+ }
+
+ swap_reg = hash_swap_reg(rctx);
+
+ if (!hdev->pdata->ux500)
+ *preg++ = stm32_hash_read(hdev, HASH_IMR);
+ *preg++ = stm32_hash_read(hdev, HASH_STR);
+ *preg++ = stm32_hash_read(hdev, HASH_CR);
+ for (i = 0; i < swap_reg; i++)
+ *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
+
+pm_runtime:
+ pm_runtime_mark_last_busy(hdev->dev);
+ pm_runtime_put_autosuspend(hdev->dev);
+}
+
static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
@@ -1070,16 +1393,26 @@ static int stm32_hash_update(struct ahash_request *req)
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
struct stm32_hash_state *state = &rctx->state;
- if (!req->nbytes || !(state->flags & HASH_FLAGS_CPU))
+ if (!req->nbytes)
return 0;
- rctx->total = req->nbytes;
- rctx->sg = req->src;
- rctx->offset = 0;
- if ((state->bufcnt + rctx->total < state->blocklen)) {
- stm32_hash_append_sg(rctx);
- return 0;
+ if (state->flags & HASH_FLAGS_CPU) {
+ rctx->total = req->nbytes;
+ rctx->sg = req->src;
+ rctx->offset = 0;
+
+ if ((state->bufcnt + rctx->total < state->blocklen)) {
+ stm32_hash_append_sg(rctx);
+ return 0;
+ }
+ } else { /* DMA mode */
+ if (state->bufcnt + req->nbytes <= state->blocklen) {
+ scatterwalk_map_and_copy(state->buffer + state->bufcnt, req->src,
+ 0, req->nbytes, 0);
+ state->bufcnt += req->nbytes;
+ return 0;
+ }
}
return stm32_hash_enqueue(req, HASH_OP_UPDATE);
@@ -1098,20 +1431,18 @@ static int stm32_hash_final(struct ahash_request *req)
static int stm32_hash_finup(struct ahash_request *req)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
struct stm32_hash_state *state = &rctx->state;
if (!req->nbytes)
goto out;
state->flags |= HASH_FLAGS_FINUP;
- rctx->total = req->nbytes;
- rctx->sg = req->src;
- rctx->offset = 0;
- if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
- state->flags &= ~HASH_FLAGS_CPU;
+ if ((state->flags & HASH_FLAGS_CPU)) {
+ rctx->total = req->nbytes;
+ rctx->sg = req->src;
+ rctx->offset = 0;
+ }
out:
return stm32_hash_final(req);
@@ -1215,7 +1546,6 @@ static int stm32_hash_cra_sha3_hmac_init(struct crypto_tfm *tfm)
HASH_FLAGS_HMAC);
}
-
static void stm32_hash_cra_exit(struct crypto_tfm *tfm)
{
struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1228,14 +1558,9 @@ static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
{
struct stm32_hash_dev *hdev = dev_id;
- if (HASH_FLAGS_CPU & hdev->flags) {
- if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
- hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
- goto finish;
- }
- } else if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
- hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
- goto finish;
+ if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
+ hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
+ goto finish;
}
return IRQ_HANDLED;
@@ -1984,6 +2309,7 @@ static const struct stm32_hash_pdata stm32_hash_pdata_stm32mp13 = {
.algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32mp13),
.has_sr = true,
.has_mdmat = true,
+ .context_secured = true,
};
static const struct of_device_id stm32_hash_of_match[] = {
diff --git a/drivers/crypto/tegra/Makefile b/drivers/crypto/tegra/Makefile
new file mode 100644
index 000000000000..a32001e58eb2
--- /dev/null
+++ b/drivers/crypto/tegra/Makefile
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+tegra-se-objs := tegra-se-key.o tegra-se-main.o
+
+tegra-se-y += tegra-se-aes.o
+tegra-se-y += tegra-se-hash.o
+
+obj-$(CONFIG_CRYPTO_DEV_TEGRA) += tegra-se.o
diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c
new file mode 100644
index 000000000000..ae7a0f8435fc
--- /dev/null
+++ b/drivers/crypto/tegra/tegra-se-aes.c
@@ -0,0 +1,1933 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+/*
+ * Crypto driver to handle block cipher algorithms using NVIDIA Security Engine.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+#include <crypto/gcm.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/xts.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+
+#include "tegra-se.h"
+
+struct tegra_aes_ctx {
+ struct tegra_se *se;
+ u32 alg;
+ u32 ivsize;
+ u32 key1_id;
+ u32 key2_id;
+};
+
+struct tegra_aes_reqctx {
+ struct tegra_se_datbuf datbuf;
+ bool encrypt;
+ u32 config;
+ u32 crypto_config;
+ u32 len;
+ u32 *iv;
+};
+
+struct tegra_aead_ctx {
+ struct tegra_se *se;
+ unsigned int authsize;
+ u32 alg;
+ u32 keylen;
+ u32 key_id;
+};
+
+struct tegra_aead_reqctx {
+ struct tegra_se_datbuf inbuf;
+ struct tegra_se_datbuf outbuf;
+ struct scatterlist *src_sg;
+ struct scatterlist *dst_sg;
+ unsigned int assoclen;
+ unsigned int cryptlen;
+ unsigned int authsize;
+ bool encrypt;
+ u32 config;
+ u32 crypto_config;
+ u32 key_id;
+ u32 iv[4];
+ u8 authdata[16];
+};
+
+struct tegra_cmac_ctx {
+ struct tegra_se *se;
+ unsigned int alg;
+ u32 key_id;
+ struct crypto_shash *fallback_tfm;
+};
+
+struct tegra_cmac_reqctx {
+ struct scatterlist *src_sg;
+ struct tegra_se_datbuf datbuf;
+ struct tegra_se_datbuf residue;
+ unsigned int total_len;
+ unsigned int blk_size;
+ unsigned int task;
+ u32 crypto_config;
+ u32 config;
+ u32 key_id;
+ u32 *iv;
+ u32 result[CMAC_RESULT_REG_COUNT];
+};
+
+/* increment counter (128-bit int) */
+static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
+{
+ do {
+ --bits;
+ nums += counter[bits];
+ counter[bits] = nums & 0xff;
+ nums >>= 8;
+ } while (bits && nums);
+}
+
+static void tegra_cbc_iv_copyback(struct skcipher_request *req, struct tegra_aes_ctx *ctx)
+{
+ struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
+ unsigned int offset;
+
+ offset = req->cryptlen - ctx->ivsize;
+
+ if (rctx->encrypt)
+ memcpy(req->iv, rctx->datbuf.buf + offset, ctx->ivsize);
+ else
+ scatterwalk_map_and_copy(req->iv, req->src, offset, ctx->ivsize, 0);
+}
+
+static void tegra_aes_update_iv(struct skcipher_request *req, struct tegra_aes_ctx *ctx)
+{
+ int num;
+
+ if (ctx->alg == SE_ALG_CBC) {
+ tegra_cbc_iv_copyback(req, ctx);
+ } else if (ctx->alg == SE_ALG_CTR) {
+ num = req->cryptlen / ctx->ivsize;
+ if (req->cryptlen % ctx->ivsize)
+ num++;
+
+ ctr_iv_inc(req->iv, ctx->ivsize, num);
+ }
+}
+
+static int tegra234_aes_crypto_cfg(u32 alg, bool encrypt)
+{
+ switch (alg) {
+ case SE_ALG_CMAC:
+ case SE_ALG_GMAC:
+ case SE_ALG_GCM:
+ case SE_ALG_GCM_FINAL:
+ return 0;
+ case SE_ALG_CBC:
+ if (encrypt)
+ return SE_CRYPTO_CFG_CBC_ENCRYPT;
+ else
+ return SE_CRYPTO_CFG_CBC_DECRYPT;
+ case SE_ALG_ECB:
+ if (encrypt)
+ return SE_CRYPTO_CFG_ECB_ENCRYPT;
+ else
+ return SE_CRYPTO_CFG_ECB_DECRYPT;
+ case SE_ALG_XTS:
+ if (encrypt)
+ return SE_CRYPTO_CFG_XTS_ENCRYPT;
+ else
+ return SE_CRYPTO_CFG_XTS_DECRYPT;
+
+ case SE_ALG_CTR:
+ return SE_CRYPTO_CFG_CTR;
+ case SE_ALG_CBC_MAC:
+ return SE_CRYPTO_CFG_CBC_MAC;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int tegra234_aes_cfg(u32 alg, bool encrypt)
+{
+ switch (alg) {
+ case SE_ALG_CBC:
+ case SE_ALG_ECB:
+ case SE_ALG_XTS:
+ case SE_ALG_CTR:
+ if (encrypt)
+ return SE_CFG_AES_ENCRYPT;
+ else
+ return SE_CFG_AES_DECRYPT;
+
+ case SE_ALG_GMAC:
+ if (encrypt)
+ return SE_CFG_GMAC_ENCRYPT;
+ else
+ return SE_CFG_GMAC_DECRYPT;
+
+ case SE_ALG_GCM:
+ if (encrypt)
+ return SE_CFG_GCM_ENCRYPT;
+ else
+ return SE_CFG_GCM_DECRYPT;
+
+ case SE_ALG_GCM_FINAL:
+ if (encrypt)
+ return SE_CFG_GCM_FINAL_ENCRYPT;
+ else
+ return SE_CFG_GCM_FINAL_DECRYPT;
+
+ case SE_ALG_CMAC:
+ return SE_CFG_CMAC;
+
+ case SE_ALG_CBC_MAC:
+ return SE_AES_ENC_ALG_AES_ENC |
+ SE_AES_DST_HASH_REG;
+ }
+ return -EINVAL;
+}
+
+static unsigned int tegra_aes_prep_cmd(struct tegra_aes_ctx *ctx,
+ struct tegra_aes_reqctx *rctx)
+{
+ unsigned int data_count, res_bits, i = 0, j;
+ struct tegra_se *se = ctx->se;
+ u32 *cpuvaddr = se->cmdbuf->addr;
+ dma_addr_t addr = rctx->datbuf.addr;
+
+ data_count = rctx->len / AES_BLOCK_SIZE;
+ res_bits = (rctx->len % AES_BLOCK_SIZE) * 8;
+
+ /*
+ * Hardware processes data_count + 1 blocks.
+ * Reduce 1 block if there is no residue
+ */
+ if (!res_bits)
+ data_count--;
+
+ if (rctx->iv) {
+ cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
+ for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
+ cpuvaddr[i++] = rctx->iv[j];
+ }
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
+ cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
+ SE_LAST_BLOCK_RES_BITS(res_bits);
+
+ cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
+ cpuvaddr[i++] = rctx->config;
+ cpuvaddr[i++] = rctx->crypto_config;
+
+ /* Source address setting */
+ cpuvaddr[i++] = lower_32_bits(addr);
+ cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) | SE_ADDR_HI_SZ(rctx->len);
+
+ /* Destination address setting */
+ cpuvaddr[i++] = lower_32_bits(addr);
+ cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(addr)) |
+ SE_ADDR_HI_SZ(rctx->len);
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
+ cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF |
+ SE_AES_OP_START;
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
+ cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
+ host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
+
+ dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
+
+ return i;
+}
+
+static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
+ struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
+ struct tegra_se *se = ctx->se;
+ unsigned int cmdlen;
+ int ret;
+
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_AES_BUFLEN,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf)
+ return -ENOMEM;
+
+ rctx->datbuf.size = SE_AES_BUFLEN;
+ rctx->iv = (u32 *)req->iv;
+ rctx->len = req->cryptlen;
+
+ /* Pad input to AES Block size */
+ if (ctx->alg != SE_ALG_XTS) {
+ if (rctx->len % AES_BLOCK_SIZE)
+ rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE);
+ }
+
+ scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0);
+
+ /* Prepare the command and submit for execution */
+ cmdlen = tegra_aes_prep_cmd(ctx, rctx);
+ ret = tegra_se_host1x_submit(se, cmdlen);
+
+ /* Copy the result */
+ tegra_aes_update_iv(req, ctx);
+ scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1);
+
+ /* Free the buffer */
+ dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ rctx->datbuf.buf, rctx->datbuf.addr);
+
+ crypto_finalize_skcipher_request(se->engine, req, ret);
+
+ return 0;
+}
+
+static int tegra_aes_cra_init(struct crypto_skcipher *tfm)
+{
+ struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct tegra_se_alg *se_alg;
+ const char *algname;
+ int ret;
+
+ se_alg = container_of(alg, struct tegra_se_alg, alg.skcipher.base);
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct tegra_aes_reqctx));
+
+ ctx->ivsize = crypto_skcipher_ivsize(tfm);
+ ctx->se = se_alg->se_dev;
+ ctx->key1_id = 0;
+ ctx->key2_id = 0;
+
+ algname = crypto_tfm_alg_name(&tfm->base);
+ ret = se_algname_to_algid(algname);
+ if (ret < 0) {
+ dev_err(ctx->se->dev, "invalid algorithm\n");
+ return ret;
+ }
+
+ ctx->alg = ret;
+
+ return 0;
+}
+
+static void tegra_aes_cra_exit(struct crypto_skcipher *tfm)
+{
+ struct tegra_aes_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+
+ if (ctx->key1_id)
+ tegra_key_invalidate(ctx->se, ctx->key1_id, ctx->alg);
+
+ if (ctx->key2_id)
+ tegra_key_invalidate(ctx->se, ctx->key2_id, ctx->alg);
+}
+
+static int tegra_aes_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (aes_check_keylen(keylen)) {
+ dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
+ return -EINVAL;
+ }
+
+ return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
+}
+
+static int tegra_xts_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 len = keylen / 2;
+ int ret;
+
+ ret = xts_verify_key(tfm, key, keylen);
+ if (ret || aes_check_keylen(len)) {
+ dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
+ return -EINVAL;
+ }
+
+ ret = tegra_key_submit(ctx->se, key, len,
+ ctx->alg, &ctx->key1_id);
+ if (ret)
+ return ret;
+
+ return tegra_key_submit(ctx->se, key + len, len,
+ ctx->alg, &ctx->key2_id);
+
+ return 0;
+}
+
+static int tegra_aes_kac_manifest(u32 user, u32 alg, u32 keylen)
+{
+ int manifest;
+
+ manifest = SE_KAC_USER_NS;
+
+ switch (alg) {
+ case SE_ALG_CBC:
+ case SE_ALG_ECB:
+ case SE_ALG_CTR:
+ manifest |= SE_KAC_ENC;
+ break;
+ case SE_ALG_XTS:
+ manifest |= SE_KAC_XTS;
+ break;
+ case SE_ALG_GCM:
+ manifest |= SE_KAC_GCM;
+ break;
+ case SE_ALG_CMAC:
+ manifest |= SE_KAC_CMAC;
+ break;
+ case SE_ALG_CBC_MAC:
+ manifest |= SE_KAC_ENC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ manifest |= SE_KAC_SIZE_128;
+ break;
+ case AES_KEYSIZE_192:
+ manifest |= SE_KAC_SIZE_192;
+ break;
+ case AES_KEYSIZE_256:
+ manifest |= SE_KAC_SIZE_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return manifest;
+}
+
+static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
+
+{
+ struct crypto_skcipher *tfm;
+ struct tegra_aes_ctx *ctx;
+ struct tegra_aes_reqctx *rctx;
+
+ tfm = crypto_skcipher_reqtfm(req);
+ ctx = crypto_skcipher_ctx(tfm);
+ rctx = skcipher_request_ctx(req);
+
+ if (ctx->alg != SE_ALG_XTS) {
+ if (!IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(tfm))) {
+ dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen);
+ return -EINVAL;
+ }
+ } else if (req->cryptlen < XTS_BLOCK_SIZE) {
+ dev_dbg(ctx->se->dev, "invalid length (%d)", req->cryptlen);
+ return -EINVAL;
+ }
+
+ if (!req->cryptlen)
+ return 0;
+
+ rctx->encrypt = encrypt;
+ rctx->config = tegra234_aes_cfg(ctx->alg, encrypt);
+ rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt);
+ rctx->crypto_config |= SE_AES_KEY_INDEX(ctx->key1_id);
+
+ if (ctx->key2_id)
+ rctx->crypto_config |= SE_AES_KEY2_INDEX(ctx->key2_id);
+
+ return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req);
+}
+
+static int tegra_aes_encrypt(struct skcipher_request *req)
+{
+ return tegra_aes_crypt(req, true);
+}
+
+static int tegra_aes_decrypt(struct skcipher_request *req)
+{
+ return tegra_aes_crypt(req, false);
+}
+
+static struct tegra_se_alg tegra_aes_algs[] = {
+ {
+ .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
+ .alg.skcipher.base = {
+ .init = tegra_aes_cra_init,
+ .exit = tegra_aes_cra_exit,
+ .setkey = tegra_aes_setkey,
+ .encrypt = tegra_aes_encrypt,
+ .decrypt = tegra_aes_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-tegra",
+ .cra_priority = 500,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_aes_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ }
+ }, {
+ .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
+ .alg.skcipher.base = {
+ .init = tegra_aes_cra_init,
+ .exit = tegra_aes_cra_exit,
+ .setkey = tegra_aes_setkey,
+ .encrypt = tegra_aes_encrypt,
+ .decrypt = tegra_aes_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-tegra",
+ .cra_priority = 500,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_aes_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ }
+ }, {
+ .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
+ .alg.skcipher.base = {
+ .init = tegra_aes_cra_init,
+ .exit = tegra_aes_cra_exit,
+ .setkey = tegra_aes_setkey,
+ .encrypt = tegra_aes_encrypt,
+ .decrypt = tegra_aes_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-tegra",
+ .cra_priority = 500,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct tegra_aes_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ }
+ }, {
+ .alg.skcipher.op.do_one_request = tegra_aes_do_one_req,
+ .alg.skcipher.base = {
+ .init = tegra_aes_cra_init,
+ .exit = tegra_aes_cra_exit,
+ .setkey = tegra_xts_setkey,
+ .encrypt = tegra_aes_encrypt,
+ .decrypt = tegra_aes_decrypt,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-tegra",
+ .cra_priority = 500,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_aes_ctx),
+ .cra_alignmask = (__alignof__(u64) - 1),
+ .cra_module = THIS_MODULE,
+ },
+ }
+ },
+};
+
+static unsigned int tegra_gmac_prep_cmd(struct tegra_aead_ctx *ctx,
+ struct tegra_aead_reqctx *rctx)
+{
+ unsigned int data_count, res_bits, i = 0;
+ struct tegra_se *se = ctx->se;
+ u32 *cpuvaddr = se->cmdbuf->addr;
+
+ data_count = (rctx->assoclen / AES_BLOCK_SIZE);
+ res_bits = (rctx->assoclen % AES_BLOCK_SIZE) * 8;
+
+ /*
+ * Hardware processes data_count + 1 blocks.
+ * Reduce 1 block if there is no residue
+ */
+ if (!res_bits)
+ data_count--;
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
+ cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
+ SE_LAST_BLOCK_RES_BITS(res_bits);
+
+ cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 4);
+ cpuvaddr[i++] = rctx->config;
+ cpuvaddr[i++] = rctx->crypto_config;
+ cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
+ cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
+ SE_ADDR_HI_SZ(rctx->assoclen);
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
+ cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
+ SE_AES_OP_INIT | SE_AES_OP_LASTBUF |
+ SE_AES_OP_START;
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
+ cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
+ host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
+
+ return i;
+}
+
+static unsigned int tegra_gcm_crypt_prep_cmd(struct tegra_aead_ctx *ctx,
+ struct tegra_aead_reqctx *rctx)
+{
+ unsigned int data_count, res_bits, i = 0, j;
+ struct tegra_se *se = ctx->se;
+ u32 *cpuvaddr = se->cmdbuf->addr, op;
+
+ data_count = (rctx->cryptlen / AES_BLOCK_SIZE);
+ res_bits = (rctx->cryptlen % AES_BLOCK_SIZE) * 8;
+ op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
+ SE_AES_OP_LASTBUF | SE_AES_OP_START;
+
+ /*
+ * If there is no assoc data,
+ * this will be the init command
+ */
+ if (!rctx->assoclen)
+ op |= SE_AES_OP_INIT;
+
+ /*
+ * Hardware processes data_count + 1 blocks.
+ * Reduce 1 block if there is no residue
+ */
+ if (!res_bits)
+ data_count--;
+
+ cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
+ for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
+ cpuvaddr[i++] = rctx->iv[j];
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
+ cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
+ SE_LAST_BLOCK_RES_BITS(res_bits);
+
+ cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
+ cpuvaddr[i++] = rctx->config;
+ cpuvaddr[i++] = rctx->crypto_config;
+
+ /* Source Address */
+ cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
+ cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
+ SE_ADDR_HI_SZ(rctx->cryptlen);
+
+ /* Destination Address */
+ cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
+ cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
+ SE_ADDR_HI_SZ(rctx->cryptlen);
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
+ cpuvaddr[i++] = op;
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
+ cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
+ host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
+
+ dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
+ return i;
+}
+
+static int tegra_gcm_prep_final_cmd(struct tegra_se *se, u32 *cpuvaddr,
+ struct tegra_aead_reqctx *rctx)
+{
+ unsigned int i = 0, j;
+ u32 op;
+
+ op = SE_AES_OP_WRSTALL | SE_AES_OP_FINAL |
+ SE_AES_OP_LASTBUF | SE_AES_OP_START;
+
+ /*
+ * Set init for zero sized vector
+ */
+ if (!rctx->assoclen && !rctx->cryptlen)
+ op |= SE_AES_OP_INIT;
+
+ cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->aad_len, 2);
+ cpuvaddr[i++] = rctx->assoclen * 8;
+ cpuvaddr[i++] = 0;
+
+ cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->cryp_msg_len, 2);
+ cpuvaddr[i++] = rctx->cryptlen * 8;
+ cpuvaddr[i++] = 0;
+
+ cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
+ for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
+ cpuvaddr[i++] = rctx->iv[j];
+
+ cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
+ cpuvaddr[i++] = rctx->config;
+ cpuvaddr[i++] = rctx->crypto_config;
+ cpuvaddr[i++] = 0;
+ cpuvaddr[i++] = 0;
+
+ /* Destination Address */
+ cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
+ cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
+ SE_ADDR_HI_SZ(0x10); /* HW always generates 128-bit tag */
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
+ cpuvaddr[i++] = op;
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
+ cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
+ host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
+
+ dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n", rctx->config, rctx->crypto_config);
+
+ return i;
+}
+
+static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
+{
+ struct tegra_se *se = ctx->se;
+ unsigned int cmdlen;
+
+ scatterwalk_map_and_copy(rctx->inbuf.buf,
+ rctx->src_sg, 0, rctx->assoclen, 0);
+
+ rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt);
+ rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) |
+ SE_AES_KEY_INDEX(ctx->key_id);
+
+ cmdlen = tegra_gmac_prep_cmd(ctx, rctx);
+
+ return tegra_se_host1x_submit(se, cmdlen);
+}
+
+static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
+{
+ struct tegra_se *se = ctx->se;
+ int cmdlen, ret;
+
+ scatterwalk_map_and_copy(rctx->inbuf.buf, rctx->src_sg,
+ rctx->assoclen, rctx->cryptlen, 0);
+
+ rctx->config = tegra234_aes_cfg(SE_ALG_GCM, rctx->encrypt);
+ rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM, rctx->encrypt) |
+ SE_AES_KEY_INDEX(ctx->key_id);
+
+ /* Prepare command and submit */
+ cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx);
+ ret = tegra_se_host1x_submit(se, cmdlen);
+ if (ret)
+ return ret;
+
+ /* Copy the result */
+ scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
+ rctx->assoclen, rctx->cryptlen, 1);
+
+ return 0;
+}
+
+static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
+{
+ struct tegra_se *se = ctx->se;
+ u32 *cpuvaddr = se->cmdbuf->addr;
+ int cmdlen, ret, offset;
+
+ rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt);
+ rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) |
+ SE_AES_KEY_INDEX(ctx->key_id);
+
+ /* Prepare command and submit */
+ cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
+ ret = tegra_se_host1x_submit(se, cmdlen);
+ if (ret)
+ return ret;
+
+ if (rctx->encrypt) {
+ /* Copy the result */
+ offset = rctx->assoclen + rctx->cryptlen;
+ scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
+ offset, rctx->authsize, 1);
+ }
+
+ return 0;
+}
+
+static int tegra_gcm_do_verify(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
+{
+ unsigned int offset;
+ u8 mac[16];
+
+ offset = rctx->assoclen + rctx->cryptlen;
+ scatterwalk_map_and_copy(mac, rctx->src_sg, offset, rctx->authsize, 0);
+
+ if (crypto_memneq(rctx->outbuf.buf, mac, rctx->authsize))
+ return -EBADMSG;
+
+ return 0;
+}
+
+static inline int tegra_ccm_check_iv(const u8 *iv)
+{
+ /* iv[0] gives value of q-1
+ * 2 <= q <= 8 as per NIST 800-38C notation
+ * 2 <= L <= 8, so 1 <= L' <= 7. as per rfc 3610 notation
+ */
+ if (iv[0] < 1 || iv[0] > 7) {
+ pr_debug("ccm_check_iv failed %d\n", iv[0]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned int tegra_cbcmac_prep_cmd(struct tegra_aead_ctx *ctx,
+ struct tegra_aead_reqctx *rctx)
+{
+ unsigned int data_count, i = 0;
+ struct tegra_se *se = ctx->se;
+ u32 *cpuvaddr = se->cmdbuf->addr;
+
+ data_count = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1;
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
+ cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count);
+
+ cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
+ cpuvaddr[i++] = rctx->config;
+ cpuvaddr[i++] = rctx->crypto_config;
+
+ cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
+ cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
+ SE_ADDR_HI_SZ(rctx->inbuf.size);
+
+ cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
+ cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
+ SE_ADDR_HI_SZ(0x10); /* HW always generates 128 bit tag */
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
+ cpuvaddr[i++] = SE_AES_OP_WRSTALL |
+ SE_AES_OP_LASTBUF | SE_AES_OP_START;
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
+ cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
+ host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
+
+ return i;
+}
+
+static unsigned int tegra_ctr_prep_cmd(struct tegra_aead_ctx *ctx,
+ struct tegra_aead_reqctx *rctx)
+{
+ unsigned int i = 0, j;
+ struct tegra_se *se = ctx->se;
+ u32 *cpuvaddr = se->cmdbuf->addr;
+
+ cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
+ for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
+ cpuvaddr[i++] = rctx->iv[j];
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
+ cpuvaddr[i++] = (rctx->inbuf.size / AES_BLOCK_SIZE) - 1;
+ cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
+ cpuvaddr[i++] = rctx->config;
+ cpuvaddr[i++] = rctx->crypto_config;
+
+ /* Source address setting */
+ cpuvaddr[i++] = lower_32_bits(rctx->inbuf.addr);
+ cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->inbuf.addr)) |
+ SE_ADDR_HI_SZ(rctx->inbuf.size);
+
+ /* Destination address setting */
+ cpuvaddr[i++] = lower_32_bits(rctx->outbuf.addr);
+ cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->outbuf.addr)) |
+ SE_ADDR_HI_SZ(rctx->inbuf.size);
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
+ cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_LASTBUF |
+ SE_AES_OP_START;
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
+ cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
+ host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
+
+ dev_dbg(se->dev, "cfg %#x crypto cfg %#x\n",
+ rctx->config, rctx->crypto_config);
+
+ return i;
+}
+
+static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
+{
+ struct tegra_se *se = ctx->se;
+ int cmdlen;
+
+ rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt);
+ rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC,
+ rctx->encrypt) |
+ SE_AES_KEY_INDEX(ctx->key_id);
+
+ /* Prepare command and submit */
+ cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx);
+
+ return tegra_se_host1x_submit(se, cmdlen);
+}
+
+static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+ __be32 data;
+
+ memset(block, 0, csize);
+ block += csize;
+
+ if (csize >= 4)
+ csize = 4;
+ else if (msglen > (1 << (8 * csize)))
+ return -EOVERFLOW;
+
+ data = cpu_to_be32(msglen);
+ memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+ return 0;
+}
+
+static int tegra_ccm_format_nonce(struct tegra_aead_reqctx *rctx, u8 *nonce)
+{
+ unsigned int q, t;
+ u8 *q_ptr, *iv = (u8 *)rctx->iv;
+
+ memcpy(nonce, rctx->iv, 16);
+
+ /*** 1. Prepare Flags Octet ***/
+
+ /* Encode t (mac length) */
+ t = rctx->authsize;
+ nonce[0] |= (((t - 2) / 2) << 3);
+
+ /* Adata */
+ if (rctx->assoclen)
+ nonce[0] |= (1 << 6);
+
+ /*** Encode Q - message length ***/
+ q = iv[0] + 1;
+ q_ptr = nonce + 16 - q;
+
+ return tegra_ccm_set_msg_len(q_ptr, rctx->cryptlen, q);
+}
+
+static int tegra_ccm_format_adata(u8 *adata, unsigned int a)
+{
+ int len = 0;
+
+ /* add control info for associated data
+ * RFC 3610 and NIST Special Publication 800-38C
+ */
+ if (a < 65280) {
+ *(__be16 *)adata = cpu_to_be16(a);
+ len = 2;
+ } else {
+ *(__be16 *)adata = cpu_to_be16(0xfffe);
+ *(__be32 *)&adata[2] = cpu_to_be32(a);
+ len = 6;
+ }
+
+ return len;
+}
+
+static int tegra_ccm_add_padding(u8 *buf, unsigned int len)
+{
+ unsigned int padlen = 16 - (len % 16);
+ u8 padding[16] = {0};
+
+ if (padlen == 16)
+ return 0;
+
+ memcpy(buf, padding, padlen);
+
+ return padlen;
+}
+
+static int tegra_ccm_format_blocks(struct tegra_aead_reqctx *rctx)
+{
+ unsigned int alen = 0, offset = 0;
+ u8 nonce[16], adata[16];
+ int ret;
+
+ ret = tegra_ccm_format_nonce(rctx, nonce);
+ if (ret)
+ return ret;
+
+ memcpy(rctx->inbuf.buf, nonce, 16);
+ offset = 16;
+
+ if (rctx->assoclen) {
+ alen = tegra_ccm_format_adata(adata, rctx->assoclen);
+ memcpy(rctx->inbuf.buf + offset, adata, alen);
+ offset += alen;
+
+ scatterwalk_map_and_copy(rctx->inbuf.buf + offset,
+ rctx->src_sg, 0, rctx->assoclen, 0);
+
+ offset += rctx->assoclen;
+ offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset,
+ rctx->assoclen + alen);
+ }
+
+ return offset;
+}
+
+static int tegra_ccm_mac_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
+{
+ u32 result[16];
+ int i, ret;
+
+ /* Read and clear Result */
+ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
+ result[i] = readl(se->base + se->hw->regs->result + (i * 4));
+
+ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
+ writel(0, se->base + se->hw->regs->result + (i * 4));
+
+ if (rctx->encrypt) {
+ memcpy(rctx->authdata, result, rctx->authsize);
+ } else {
+ ret = crypto_memneq(rctx->authdata, result, rctx->authsize);
+ if (ret)
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
+static int tegra_ccm_ctr_result(struct tegra_se *se, struct tegra_aead_reqctx *rctx)
+{
+ /* Copy result */
+ scatterwalk_map_and_copy(rctx->outbuf.buf + 16, rctx->dst_sg,
+ rctx->assoclen, rctx->cryptlen, 1);
+
+ if (rctx->encrypt)
+ scatterwalk_map_and_copy(rctx->outbuf.buf, rctx->dst_sg,
+ rctx->assoclen + rctx->cryptlen,
+ rctx->authsize, 1);
+ else
+ memcpy(rctx->authdata, rctx->outbuf.buf, rctx->authsize);
+
+ return 0;
+}
+
+static int tegra_ccm_compute_auth(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
+{
+ struct tegra_se *se = ctx->se;
+ struct scatterlist *sg;
+ int offset, ret;
+
+ offset = tegra_ccm_format_blocks(rctx);
+ if (offset < 0)
+ return -EINVAL;
+
+ /* Copy plain text to the buffer */
+ sg = rctx->encrypt ? rctx->src_sg : rctx->dst_sg;
+
+ scatterwalk_map_and_copy(rctx->inbuf.buf + offset,
+ sg, rctx->assoclen,
+ rctx->cryptlen, 0);
+ offset += rctx->cryptlen;
+ offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen);
+
+ rctx->inbuf.size = offset;
+
+ ret = tegra_ccm_do_cbcmac(ctx, rctx);
+ if (ret)
+ return ret;
+
+ return tegra_ccm_mac_result(se, rctx);
+}
+
+static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
+{
+ struct tegra_se *se = ctx->se;
+ unsigned int cmdlen, offset = 0;
+ struct scatterlist *sg = rctx->src_sg;
+ int ret;
+
+ rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt);
+ rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) |
+ SE_AES_KEY_INDEX(ctx->key_id);
+
+ /* Copy authdata in the top of buffer for encryption/decryption */
+ if (rctx->encrypt)
+ memcpy(rctx->inbuf.buf, rctx->authdata, rctx->authsize);
+ else
+ scatterwalk_map_and_copy(rctx->inbuf.buf, sg,
+ rctx->assoclen + rctx->cryptlen,
+ rctx->authsize, 0);
+
+ offset += rctx->authsize;
+ offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->authsize);
+
+ /* If there is no cryptlen, proceed to submit the task */
+ if (rctx->cryptlen) {
+ scatterwalk_map_and_copy(rctx->inbuf.buf + offset, sg,
+ rctx->assoclen, rctx->cryptlen, 0);
+ offset += rctx->cryptlen;
+ offset += tegra_ccm_add_padding(rctx->inbuf.buf + offset, rctx->cryptlen);
+ }
+
+ rctx->inbuf.size = offset;
+
+ /* Prepare command and submit */
+ cmdlen = tegra_ctr_prep_cmd(ctx, rctx);
+ ret = tegra_se_host1x_submit(se, cmdlen);
+ if (ret)
+ return ret;
+
+ return tegra_ccm_ctr_result(se, rctx);
+}
+
+static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se,
+ struct tegra_aead_reqctx *rctx)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ u8 *iv = (u8 *)rctx->iv;
+ int ret, i;
+
+ rctx->src_sg = req->src;
+ rctx->dst_sg = req->dst;
+ rctx->assoclen = req->assoclen;
+ rctx->authsize = crypto_aead_authsize(tfm);
+
+ memcpy(iv, req->iv, 16);
+
+ ret = tegra_ccm_check_iv(iv);
+ if (ret)
+ return ret;
+
+ /* Note: rfc 3610 and NIST 800-38C require counter (ctr_0) of
+ * zero to encrypt auth tag.
+ * req->iv has the formatted ctr_0 (i.e. Flags || N || 0).
+ */
+ memset(iv + 15 - iv[0], 0, iv[0] + 1);
+
+ /* Clear any previous result */
+ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
+ writel(0, se->base + se->hw->regs->result + (i * 4));
+
+ return 0;
+}
+
+static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = container_of(areq, struct aead_request, base);
+ struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct tegra_se *se = ctx->se;
+ int ret;
+
+ /* Allocate buffers required */
+ rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ &rctx->inbuf.addr, GFP_KERNEL);
+ if (!rctx->inbuf.buf)
+ return -ENOMEM;
+
+ rctx->inbuf.size = SE_AES_BUFLEN;
+
+ rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ &rctx->outbuf.addr, GFP_KERNEL);
+ if (!rctx->outbuf.buf) {
+ ret = -ENOMEM;
+ goto outbuf_err;
+ }
+
+ rctx->outbuf.size = SE_AES_BUFLEN;
+
+ ret = tegra_ccm_crypt_init(req, se, rctx);
+ if (ret)
+ goto out;
+
+ if (rctx->encrypt) {
+ rctx->cryptlen = req->cryptlen;
+
+ /* CBC MAC Operation */
+ ret = tegra_ccm_compute_auth(ctx, rctx);
+ if (ret)
+ goto out;
+
+ /* CTR operation */
+ ret = tegra_ccm_do_ctr(ctx, rctx);
+ if (ret)
+ goto out;
+ } else {
+ rctx->cryptlen = req->cryptlen - ctx->authsize;
+ if (ret)
+ goto out;
+
+ /* CTR operation */
+ ret = tegra_ccm_do_ctr(ctx, rctx);
+ if (ret)
+ goto out;
+
+ /* CBC MAC Operation */
+ ret = tegra_ccm_compute_auth(ctx, rctx);
+ if (ret)
+ goto out;
+ }
+
+out:
+ dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ rctx->outbuf.buf, rctx->outbuf.addr);
+
+outbuf_err:
+ dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ rctx->inbuf.buf, rctx->inbuf.addr);
+
+ crypto_finalize_aead_request(ctx->se->engine, req, ret);
+
+ return 0;
+}
+
+static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = container_of(areq, struct aead_request, base);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
+ int ret;
+
+ /* Allocate buffers required */
+ rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ &rctx->inbuf.addr, GFP_KERNEL);
+ if (!rctx->inbuf.buf)
+ return -ENOMEM;
+
+ rctx->inbuf.size = SE_AES_BUFLEN;
+
+ rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ &rctx->outbuf.addr, GFP_KERNEL);
+ if (!rctx->outbuf.buf) {
+ ret = -ENOMEM;
+ goto outbuf_err;
+ }
+
+ rctx->outbuf.size = SE_AES_BUFLEN;
+
+ rctx->src_sg = req->src;
+ rctx->dst_sg = req->dst;
+ rctx->assoclen = req->assoclen;
+ rctx->authsize = crypto_aead_authsize(tfm);
+
+ if (rctx->encrypt)
+ rctx->cryptlen = req->cryptlen;
+ else
+ rctx->cryptlen = req->cryptlen - ctx->authsize;
+
+ memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
+ rctx->iv[3] = (1 << 24);
+
+ /* If there is associated data perform GMAC operation */
+ if (rctx->assoclen) {
+ ret = tegra_gcm_do_gmac(ctx, rctx);
+ if (ret)
+ goto out;
+ }
+
+ /* GCM Encryption/Decryption operation */
+ if (rctx->cryptlen) {
+ ret = tegra_gcm_do_crypt(ctx, rctx);
+ if (ret)
+ goto out;
+ }
+
+ /* GCM_FINAL operation */
+ ret = tegra_gcm_do_final(ctx, rctx);
+ if (ret)
+ goto out;
+
+ if (!rctx->encrypt)
+ ret = tegra_gcm_do_verify(ctx->se, rctx);
+
+out:
+ dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ rctx->outbuf.buf, rctx->outbuf.addr);
+
+outbuf_err:
+ dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ rctx->inbuf.buf, rctx->inbuf.addr);
+
+ /* Finalize the request if there are no errors */
+ crypto_finalize_aead_request(ctx->se->engine, req, ret);
+
+ return 0;
+}
+
+static int tegra_aead_cra_init(struct crypto_aead *tfm)
+{
+ struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct tegra_se_alg *se_alg;
+ const char *algname;
+ int ret;
+
+ algname = crypto_tfm_alg_name(&tfm->base);
+
+ se_alg = container_of(alg, struct tegra_se_alg, alg.aead.base);
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct tegra_aead_reqctx));
+
+ ctx->se = se_alg->se_dev;
+ ctx->key_id = 0;
+
+ ret = se_algname_to_algid(algname);
+ if (ret < 0) {
+ dev_err(ctx->se->dev, "invalid algorithm\n");
+ return ret;
+ }
+
+ ctx->alg = ret;
+
+ return 0;
+}
+
+static int tegra_ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ switch (authsize) {
+ case 4:
+ case 6:
+ case 8:
+ case 10:
+ case 12:
+ case 14:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ctx->authsize = authsize;
+
+ return 0;
+}
+
+static int tegra_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
+
+ ret = crypto_gcm_check_authsize(authsize);
+ if (ret)
+ return ret;
+
+ ctx->authsize = authsize;
+
+ return 0;
+}
+
+static void tegra_aead_cra_exit(struct crypto_aead *tfm)
+{
+ struct tegra_aead_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+
+ if (ctx->key_id)
+ tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
+}
+
+static int tegra_aead_crypt(struct aead_request *req, bool encrypt)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
+
+ rctx->encrypt = encrypt;
+
+ return crypto_transfer_aead_request_to_engine(ctx->se->engine, req);
+}
+
+static int tegra_aead_encrypt(struct aead_request *req)
+{
+ return tegra_aead_crypt(req, true);
+}
+
+static int tegra_aead_decrypt(struct aead_request *req)
+{
+ return tegra_aead_crypt(req, false);
+}
+
+static int tegra_aead_setkey(struct crypto_aead *tfm,
+ const u8 *key, u32 keylen)
+{
+ struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (aes_check_keylen(keylen)) {
+ dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
+ return -EINVAL;
+ }
+
+ return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+}
+
+static unsigned int tegra_cmac_prep_cmd(struct tegra_cmac_ctx *ctx,
+ struct tegra_cmac_reqctx *rctx)
+{
+ unsigned int data_count, res_bits = 0, i = 0, j;
+ struct tegra_se *se = ctx->se;
+ u32 *cpuvaddr = se->cmdbuf->addr, op;
+
+ data_count = (rctx->datbuf.size / AES_BLOCK_SIZE);
+
+ op = SE_AES_OP_WRSTALL | SE_AES_OP_START | SE_AES_OP_LASTBUF;
+
+ if (!(rctx->task & SHA_UPDATE)) {
+ op |= SE_AES_OP_FINAL;
+ res_bits = (rctx->datbuf.size % AES_BLOCK_SIZE) * 8;
+ }
+
+ if (!res_bits && data_count)
+ data_count--;
+
+ if (rctx->task & SHA_FIRST) {
+ rctx->task &= ~SHA_FIRST;
+
+ cpuvaddr[i++] = host1x_opcode_setpayload(SE_CRYPTO_CTR_REG_COUNT);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->linear_ctr);
+ /* Load 0 IV */
+ for (j = 0; j < SE_CRYPTO_CTR_REG_COUNT; j++)
+ cpuvaddr[i++] = 0;
+ }
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->last_blk, 1);
+ cpuvaddr[i++] = SE_LAST_BLOCK_VAL(data_count) |
+ SE_LAST_BLOCK_RES_BITS(res_bits);
+
+ cpuvaddr[i++] = se_host1x_opcode_incr(se->hw->regs->config, 6);
+ cpuvaddr[i++] = rctx->config;
+ cpuvaddr[i++] = rctx->crypto_config;
+
+ /* Source Address */
+ cpuvaddr[i++] = lower_32_bits(rctx->datbuf.addr);
+ cpuvaddr[i++] = SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) |
+ SE_ADDR_HI_SZ(rctx->datbuf.size);
+ cpuvaddr[i++] = 0;
+ cpuvaddr[i++] = SE_ADDR_HI_SZ(AES_BLOCK_SIZE);
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(se->hw->regs->op, 1);
+ cpuvaddr[i++] = op;
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
+ cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
+ host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
+
+ return i;
+}
+
+static void tegra_cmac_copy_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
+{
+ int i;
+
+ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
+ rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4));
+}
+
+static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqctx *rctx)
+{
+ int i;
+
+ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
+ writel(rctx->result[i],
+ se->base + se->hw->regs->result + (i * 4));
+}
+
+static int tegra_cmac_do_update(struct ahash_request *req)
+{
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
+ unsigned int nblks, nresidue, cmdlen;
+ int ret;
+
+ if (!req->nbytes)
+ return 0;
+
+ nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
+ nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
+
+ /*
+ * Reserve the last block as residue during final() to process.
+ */
+ if (!nresidue && nblks) {
+ nresidue += rctx->blk_size;
+ nblks--;
+ }
+
+ rctx->src_sg = req->src;
+ rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
+ rctx->total_len += rctx->datbuf.size;
+ rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
+ rctx->crypto_config = SE_AES_KEY_INDEX(ctx->key_id);
+
+ /*
+ * Keep one block and residue bytes in residue and
+ * return. The bytes will be processed in final()
+ */
+ if (nblks < 1) {
+ scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size,
+ rctx->src_sg, 0, req->nbytes, 0);
+
+ rctx->residue.size += req->nbytes;
+ return 0;
+ }
+
+ /* Copy the previous residue first */
+ if (rctx->residue.size)
+ memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
+
+ scatterwalk_map_and_copy(rctx->datbuf.buf + rctx->residue.size,
+ rctx->src_sg, 0, req->nbytes - nresidue, 0);
+
+ scatterwalk_map_and_copy(rctx->residue.buf, rctx->src_sg,
+ req->nbytes - nresidue, nresidue, 0);
+
+ /* Update residue value with the residue after current block */
+ rctx->residue.size = nresidue;
+
+ /*
+ * If this is not the first 'update' call, paste the previous copied
+ * intermediate results to the registers so that it gets picked up.
+ * This is to support the import/export functionality.
+ */
+ if (!(rctx->task & SHA_FIRST))
+ tegra_cmac_paste_result(ctx->se, rctx);
+
+ cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
+
+ ret = tegra_se_host1x_submit(se, cmdlen);
+ /*
+ * If this is not the final update, copy the intermediate results
+ * from the registers so that it can be used in the next 'update'
+ * call. This is to support the import/export functionality.
+ */
+ if (!(rctx->task & SHA_FINAL))
+ tegra_cmac_copy_result(ctx->se, rctx);
+
+ return ret;
+}
+
+static int tegra_cmac_do_final(struct ahash_request *req)
+{
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
+ u32 *result = (u32 *)req->result;
+ int ret = 0, i, cmdlen;
+
+ if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) {
+ return crypto_shash_tfm_digest(ctx->fallback_tfm,
+ rctx->datbuf.buf, 0, req->result);
+ }
+
+ memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
+ rctx->datbuf.size = rctx->residue.size;
+ rctx->total_len += rctx->residue.size;
+ rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
+
+ /* Prepare command and submit */
+ cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
+ ret = tegra_se_host1x_submit(se, cmdlen);
+ if (ret)
+ goto out;
+
+ /* Read and clear Result register */
+ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
+ result[i] = readl(se->base + se->hw->regs->result + (i * 4));
+
+ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
+ writel(0, se->base + se->hw->regs->result + (i * 4));
+
+out:
+ dma_free_coherent(se->dev, SE_SHA_BUFLEN,
+ rctx->datbuf.buf, rctx->datbuf.addr);
+ dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2,
+ rctx->residue.buf, rctx->residue.addr);
+ return ret;
+}
+
+static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
+ int ret;
+
+ if (rctx->task & SHA_UPDATE) {
+ ret = tegra_cmac_do_update(req);
+ rctx->task &= ~SHA_UPDATE;
+ }
+
+ if (rctx->task & SHA_FINAL) {
+ ret = tegra_cmac_do_final(req);
+ rctx->task &= ~SHA_FINAL;
+ }
+
+ crypto_finalize_hash_request(se->engine, req, ret);
+
+ return 0;
+}
+
+static void tegra_cmac_init_fallback(struct crypto_ahash *tfm, struct tegra_cmac_ctx *ctx,
+ const char *algname)
+{
+ unsigned int statesize;
+
+ ctx->fallback_tfm = crypto_alloc_shash(algname, 0, CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(ctx->fallback_tfm)) {
+ dev_warn(ctx->se->dev, "failed to allocate fallback for %s\n", algname);
+ ctx->fallback_tfm = NULL;
+ return;
+ }
+
+ statesize = crypto_shash_statesize(ctx->fallback_tfm);
+
+ if (statesize > sizeof(struct tegra_cmac_reqctx))
+ crypto_ahash_set_statesize(tfm, statesize);
+}
+
+static int tegra_cmac_cra_init(struct crypto_tfm *tfm)
+{
+ struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_ahash *ahash_tfm = __crypto_ahash_cast(tfm);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+ struct tegra_se_alg *se_alg;
+ const char *algname;
+ int ret;
+
+ algname = crypto_tfm_alg_name(tfm);
+ se_alg = container_of(alg, struct tegra_se_alg, alg.ahash.base);
+
+ crypto_ahash_set_reqsize(ahash_tfm, sizeof(struct tegra_cmac_reqctx));
+
+ ctx->se = se_alg->se_dev;
+ ctx->key_id = 0;
+
+ ret = se_algname_to_algid(algname);
+ if (ret < 0) {
+ dev_err(ctx->se->dev, "invalid algorithm\n");
+ return ret;
+ }
+
+ ctx->alg = ret;
+
+ tegra_cmac_init_fallback(ahash_tfm, ctx, algname);
+
+ return 0;
+}
+
+static void tegra_cmac_cra_exit(struct crypto_tfm *tfm)
+{
+ struct tegra_cmac_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->fallback_tfm)
+ crypto_free_shash(ctx->fallback_tfm);
+
+ tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
+}
+
+static int tegra_cmac_init(struct ahash_request *req)
+{
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
+ int i;
+
+ rctx->total_len = 0;
+ rctx->datbuf.size = 0;
+ rctx->residue.size = 0;
+ rctx->task = SHA_FIRST;
+ rctx->blk_size = crypto_ahash_blocksize(tfm);
+
+ rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
+ &rctx->residue.addr, GFP_KERNEL);
+ if (!rctx->residue.buf)
+ goto resbuf_fail;
+
+ rctx->residue.size = 0;
+
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf)
+ goto datbuf_fail;
+
+ rctx->datbuf.size = 0;
+
+ /* Clear any previous result */
+ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
+ writel(0, se->base + se->hw->regs->result + (i * 4));
+
+ return 0;
+
+datbuf_fail:
+ dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
+ rctx->residue.addr);
+resbuf_fail:
+ return -ENOMEM;
+}
+
+static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (aes_check_keylen(keylen)) {
+ dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
+ return -EINVAL;
+ }
+
+ if (ctx->fallback_tfm)
+ crypto_shash_setkey(ctx->fallback_tfm, key, keylen);
+
+ return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+}
+
+static int tegra_cmac_update(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+
+ rctx->task |= SHA_UPDATE;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
+}
+
+static int tegra_cmac_final(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+
+ rctx->task |= SHA_FINAL;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
+}
+
+static int tegra_cmac_finup(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+
+ rctx->task |= SHA_UPDATE | SHA_FINAL;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
+}
+
+static int tegra_cmac_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+
+ tegra_cmac_init(req);
+ rctx->task |= SHA_UPDATE | SHA_FINAL;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
+}
+
+static int tegra_cmac_export(struct ahash_request *req, void *out)
+{
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+
+ memcpy(out, rctx, sizeof(*rctx));
+
+ return 0;
+}
+
+static int tegra_cmac_import(struct ahash_request *req, const void *in)
+{
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+
+ memcpy(rctx, in, sizeof(*rctx));
+
+ return 0;
+}
+
+static struct tegra_se_alg tegra_aead_algs[] = {
+ {
+ .alg.aead.op.do_one_request = tegra_gcm_do_one_req,
+ .alg.aead.base = {
+ .init = tegra_aead_cra_init,
+ .exit = tegra_aead_cra_exit,
+ .setkey = tegra_aead_setkey,
+ .setauthsize = tegra_gcm_setauthsize,
+ .encrypt = tegra_aead_encrypt,
+ .decrypt = tegra_aead_decrypt,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .ivsize = GCM_AES_IV_SIZE,
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-tegra",
+ .cra_priority = 500,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct tegra_aead_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ }
+ }, {
+ .alg.aead.op.do_one_request = tegra_ccm_do_one_req,
+ .alg.aead.base = {
+ .init = tegra_aead_cra_init,
+ .exit = tegra_aead_cra_exit,
+ .setkey = tegra_aead_setkey,
+ .setauthsize = tegra_ccm_setauthsize,
+ .encrypt = tegra_aead_encrypt,
+ .decrypt = tegra_aead_decrypt,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-tegra",
+ .cra_priority = 500,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct tegra_aead_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ }
+ }
+};
+
+static struct tegra_se_alg tegra_cmac_algs[] = {
+ {
+ .alg.ahash.op.do_one_request = tegra_cmac_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_cmac_init,
+ .setkey = tegra_cmac_setkey,
+ .update = tegra_cmac_update,
+ .final = tegra_cmac_final,
+ .finup = tegra_cmac_finup,
+ .digest = tegra_cmac_digest,
+ .export = tegra_cmac_export,
+ .import = tegra_cmac_import,
+ .halg.digestsize = AES_BLOCK_SIZE,
+ .halg.statesize = sizeof(struct tegra_cmac_reqctx),
+ .halg.base = {
+ .cra_name = "cmac(aes)",
+ .cra_driver_name = "tegra-se-cmac",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_cmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_cmac_cra_init,
+ .cra_exit = tegra_cmac_cra_exit,
+ }
+ }
+ }
+};
+
+int tegra_init_aes(struct tegra_se *se)
+{
+ struct aead_engine_alg *aead_alg;
+ struct ahash_engine_alg *ahash_alg;
+ struct skcipher_engine_alg *sk_alg;
+ int i, ret;
+
+ se->manifest = tegra_aes_kac_manifest;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++) {
+ sk_alg = &tegra_aes_algs[i].alg.skcipher;
+ tegra_aes_algs[i].se_dev = se;
+
+ ret = crypto_engine_register_skcipher(sk_alg);
+ if (ret) {
+ dev_err(se->dev, "failed to register %s\n",
+ sk_alg->base.base.cra_name);
+ goto err_aes;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++) {
+ aead_alg = &tegra_aead_algs[i].alg.aead;
+ tegra_aead_algs[i].se_dev = se;
+
+ ret = crypto_engine_register_aead(aead_alg);
+ if (ret) {
+ dev_err(se->dev, "failed to register %s\n",
+ aead_alg->base.base.cra_name);
+ goto err_aead;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++) {
+ ahash_alg = &tegra_cmac_algs[i].alg.ahash;
+ tegra_cmac_algs[i].se_dev = se;
+
+ ret = crypto_engine_register_ahash(ahash_alg);
+ if (ret) {
+ dev_err(se->dev, "failed to register %s\n",
+ ahash_alg->base.halg.base.cra_name);
+ goto err_cmac;
+ }
+ }
+
+ return 0;
+
+err_cmac:
+ while (i--)
+ crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash);
+
+ i = ARRAY_SIZE(tegra_aead_algs);
+err_aead:
+ while (i--)
+ crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead);
+
+ i = ARRAY_SIZE(tegra_aes_algs);
+err_aes:
+ while (i--)
+ crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher);
+
+ return ret;
+}
+
+void tegra_deinit_aes(struct tegra_se *se)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_aes_algs); i++)
+ crypto_engine_unregister_skcipher(&tegra_aes_algs[i].alg.skcipher);
+
+ for (i = 0; i < ARRAY_SIZE(tegra_aead_algs); i++)
+ crypto_engine_unregister_aead(&tegra_aead_algs[i].alg.aead);
+
+ for (i = 0; i < ARRAY_SIZE(tegra_cmac_algs); i++)
+ crypto_engine_unregister_ahash(&tegra_cmac_algs[i].alg.ahash);
+}
diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
new file mode 100644
index 000000000000..4d4bd727f498
--- /dev/null
+++ b/drivers/crypto/tegra/tegra-se-hash.c
@@ -0,0 +1,1060 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+/*
+ * Crypto driver to handle HASH algorithms using NVIDIA Security Engine.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <crypto/aes.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/sha3.h>
+#include <crypto/internal/des.h>
+#include <crypto/engine.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+
+#include "tegra-se.h"
+
+struct tegra_sha_ctx {
+ struct tegra_se *se;
+ unsigned int alg;
+ bool fallback;
+ u32 key_id;
+ struct crypto_ahash *fallback_tfm;
+};
+
+struct tegra_sha_reqctx {
+ struct scatterlist *src_sg;
+ struct tegra_se_datbuf datbuf;
+ struct tegra_se_datbuf residue;
+ struct tegra_se_datbuf digest;
+ unsigned int alg;
+ unsigned int config;
+ unsigned int total_len;
+ unsigned int blk_size;
+ unsigned int task;
+ u32 key_id;
+ u32 result[HASH_RESULT_REG_COUNT];
+ struct ahash_request fallback_req;
+};
+
+static int tegra_sha_get_config(u32 alg)
+{
+ int cfg = 0;
+
+ switch (alg) {
+ case SE_ALG_SHA1:
+ cfg |= SE_SHA_ENC_ALG_SHA;
+ cfg |= SE_SHA_ENC_MODE_SHA1;
+ break;
+
+ case SE_ALG_HMAC_SHA224:
+ cfg |= SE_SHA_ENC_ALG_HMAC;
+ fallthrough;
+ case SE_ALG_SHA224:
+ cfg |= SE_SHA_ENC_ALG_SHA;
+ cfg |= SE_SHA_ENC_MODE_SHA224;
+ break;
+
+ case SE_ALG_HMAC_SHA256:
+ cfg |= SE_SHA_ENC_ALG_HMAC;
+ fallthrough;
+ case SE_ALG_SHA256:
+ cfg |= SE_SHA_ENC_ALG_SHA;
+ cfg |= SE_SHA_ENC_MODE_SHA256;
+ break;
+
+ case SE_ALG_HMAC_SHA384:
+ cfg |= SE_SHA_ENC_ALG_HMAC;
+ fallthrough;
+ case SE_ALG_SHA384:
+ cfg |= SE_SHA_ENC_ALG_SHA;
+ cfg |= SE_SHA_ENC_MODE_SHA384;
+ break;
+
+ case SE_ALG_HMAC_SHA512:
+ cfg |= SE_SHA_ENC_ALG_HMAC;
+ fallthrough;
+ case SE_ALG_SHA512:
+ cfg |= SE_SHA_ENC_ALG_SHA;
+ cfg |= SE_SHA_ENC_MODE_SHA512;
+ break;
+
+ case SE_ALG_SHA3_224:
+ cfg |= SE_SHA_ENC_ALG_SHA;
+ cfg |= SE_SHA_ENC_MODE_SHA3_224;
+ break;
+ case SE_ALG_SHA3_256:
+ cfg |= SE_SHA_ENC_ALG_SHA;
+ cfg |= SE_SHA_ENC_MODE_SHA3_256;
+ break;
+ case SE_ALG_SHA3_384:
+ cfg |= SE_SHA_ENC_ALG_SHA;
+ cfg |= SE_SHA_ENC_MODE_SHA3_384;
+ break;
+ case SE_ALG_SHA3_512:
+ cfg |= SE_SHA_ENC_ALG_SHA;
+ cfg |= SE_SHA_ENC_MODE_SHA3_512;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return cfg;
+}
+
+static int tegra_sha_fallback_init(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_init(&rctx->fallback_req);
+}
+
+static int tegra_sha_fallback_update(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+
+ return crypto_ahash_update(&rctx->fallback_req);
+}
+
+static int tegra_sha_fallback_final(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_final(&rctx->fallback_req);
+}
+
+static int tegra_sha_fallback_finup(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_finup(&rctx->fallback_req);
+}
+
+static int tegra_sha_fallback_digest(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_digest(&rctx->fallback_req);
+}
+
+static int tegra_sha_fallback_import(struct ahash_request *req, const void *in)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_import(&rctx->fallback_req, in);
+}
+
+static int tegra_sha_fallback_export(struct ahash_request *req, void *out)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_export(&rctx->fallback_req, out);
+}
+
+static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
+ struct tegra_sha_reqctx *rctx)
+{
+ u64 msg_len, msg_left;
+ int i = 0;
+
+ msg_len = rctx->total_len * 8;
+ msg_left = rctx->datbuf.size * 8;
+
+ /*
+ * If IN_ADDR_HI_0.SZ > SHA_MSG_LEFT_[0-3] to the HASH engine,
+ * HW treats it as the last buffer and process the data.
+ * Therefore, add an extra byte to msg_left if it is not the
+ * last buffer.
+ */
+ if (rctx->task & SHA_UPDATE) {
+ msg_left += 8;
+ msg_len += 8;
+ }
+
+ cpuvaddr[i++] = host1x_opcode_setpayload(8);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_MSG_LENGTH);
+ cpuvaddr[i++] = lower_32_bits(msg_len);
+ cpuvaddr[i++] = upper_32_bits(msg_len);
+ cpuvaddr[i++] = 0;
+ cpuvaddr[i++] = 0;
+ cpuvaddr[i++] = lower_32_bits(msg_left);
+ cpuvaddr[i++] = upper_32_bits(msg_left);
+ cpuvaddr[i++] = 0;
+ cpuvaddr[i++] = 0;
+ cpuvaddr[i++] = host1x_opcode_setpayload(6);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_CFG);
+ cpuvaddr[i++] = rctx->config;
+
+ if (rctx->task & SHA_FIRST) {
+ cpuvaddr[i++] = SE_SHA_TASK_HASH_INIT;
+ rctx->task &= ~SHA_FIRST;
+ } else {
+ cpuvaddr[i++] = 0;
+ }
+
+ cpuvaddr[i++] = rctx->datbuf.addr;
+ cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) |
+ SE_ADDR_HI_SZ(rctx->datbuf.size));
+ cpuvaddr[i++] = rctx->digest.addr;
+ cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) |
+ SE_ADDR_HI_SZ(rctx->digest.size));
+ if (rctx->key_id) {
+ cpuvaddr[i++] = host1x_opcode_setpayload(1);
+ cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_CRYPTO_CFG);
+ cpuvaddr[i++] = SE_AES_KEY_INDEX(rctx->key_id);
+ }
+
+ cpuvaddr[i++] = host1x_opcode_setpayload(1);
+ cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_OPERATION);
+ cpuvaddr[i++] = SE_SHA_OP_WRSTALL |
+ SE_SHA_OP_START |
+ SE_SHA_OP_LASTBUF;
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
+ cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
+ host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
+
+ dev_dbg(se->dev, "msg len %llu msg left %llu cfg %#x",
+ msg_len, msg_left, rctx->config);
+
+ return i;
+}
+
+static void tegra_sha_copy_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx)
+{
+ int i;
+
+ for (i = 0; i < HASH_RESULT_REG_COUNT; i++)
+ rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4));
+}
+
+static void tegra_sha_paste_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx)
+{
+ int i;
+
+ for (i = 0; i < HASH_RESULT_REG_COUNT; i++)
+ writel(rctx->result[i],
+ se->base + se->hw->regs->result + (i * 4));
+}
+
+static int tegra_sha_do_update(struct ahash_request *req)
+{
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ unsigned int nblks, nresidue, size, ret;
+ u32 *cpuvaddr = ctx->se->cmdbuf->addr;
+
+ nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
+ nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
+
+ /*
+ * If nbytes is a multiple of block size and there is no residue,
+ * then reserve the last block as residue during final() to process.
+ */
+ if (!nresidue && nblks) {
+ nresidue = rctx->blk_size;
+ nblks--;
+ }
+
+ rctx->src_sg = req->src;
+ rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
+ rctx->total_len += rctx->datbuf.size;
+
+ /*
+ * If nbytes are less than a block size, copy it residue and
+ * return. The bytes will be processed in final()
+ */
+ if (nblks < 1) {
+ scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size,
+ rctx->src_sg, 0, req->nbytes, 0);
+
+ rctx->residue.size += req->nbytes;
+ return 0;
+ }
+
+ /* Copy the previous residue first */
+ if (rctx->residue.size)
+ memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
+
+ scatterwalk_map_and_copy(rctx->datbuf.buf + rctx->residue.size,
+ rctx->src_sg, 0, req->nbytes - nresidue, 0);
+
+ scatterwalk_map_and_copy(rctx->residue.buf, rctx->src_sg,
+ req->nbytes - nresidue, nresidue, 0);
+
+ /* Update residue value with the residue after current block */
+ rctx->residue.size = nresidue;
+
+ rctx->config = tegra_sha_get_config(rctx->alg) |
+ SE_SHA_DST_HASH_REG;
+
+ /*
+ * If this is not the first 'update' call, paste the previous copied
+ * intermediate results to the registers so that it gets picked up.
+ * This is to support the import/export functionality.
+ */
+ if (!(rctx->task & SHA_FIRST))
+ tegra_sha_paste_hash_result(ctx->se, rctx);
+
+ size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx);
+
+ ret = tegra_se_host1x_submit(ctx->se, size);
+
+ /*
+ * If this is not the final update, copy the intermediate results
+ * from the registers so that it can be used in the next 'update'
+ * call. This is to support the import/export functionality.
+ */
+ if (!(rctx->task & SHA_FINAL))
+ tegra_sha_copy_hash_result(ctx->se, rctx);
+
+ return ret;
+}
+
+static int tegra_sha_do_final(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
+ u32 *cpuvaddr = se->cmdbuf->addr;
+ int size, ret = 0;
+
+ memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
+ rctx->datbuf.size = rctx->residue.size;
+ rctx->total_len += rctx->residue.size;
+
+ rctx->config = tegra_sha_get_config(rctx->alg) |
+ SE_SHA_DST_MEMORY;
+
+ size = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
+
+ ret = tegra_se_host1x_submit(se, size);
+ if (ret)
+ goto out;
+
+ /* Copy result */
+ memcpy(req->result, rctx->digest.buf, rctx->digest.size);
+
+out:
+ dma_free_coherent(se->dev, SE_SHA_BUFLEN,
+ rctx->datbuf.buf, rctx->datbuf.addr);
+ dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm),
+ rctx->residue.buf, rctx->residue.addr);
+ dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
+ rctx->digest.addr);
+ return ret;
+}
+
+static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
+ int ret = 0;
+
+ if (rctx->task & SHA_UPDATE) {
+ ret = tegra_sha_do_update(req);
+ rctx->task &= ~SHA_UPDATE;
+ }
+
+ if (rctx->task & SHA_FINAL) {
+ ret = tegra_sha_do_final(req);
+ rctx->task &= ~SHA_FINAL;
+ }
+
+ crypto_finalize_hash_request(se->engine, req, ret);
+
+ return 0;
+}
+
+static void tegra_sha_init_fallback(struct crypto_ahash *tfm, struct tegra_sha_ctx *ctx,
+ const char *algname)
+{
+ unsigned int statesize;
+
+ ctx->fallback_tfm = crypto_alloc_ahash(algname, 0, CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(ctx->fallback_tfm)) {
+ dev_warn(ctx->se->dev,
+ "failed to allocate fallback for %s\n", algname);
+ ctx->fallback_tfm = NULL;
+ return;
+ }
+
+ statesize = crypto_ahash_statesize(ctx->fallback_tfm);
+
+ if (statesize > sizeof(struct tegra_sha_reqctx))
+ crypto_ahash_set_statesize(tfm, statesize);
+
+ /* Update reqsize if fallback is added */
+ crypto_ahash_set_reqsize(tfm,
+ sizeof(struct tegra_sha_reqctx) +
+ crypto_ahash_reqsize(ctx->fallback_tfm));
+}
+
+static int tegra_sha_cra_init(struct crypto_tfm *tfm)
+{
+ struct tegra_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_ahash *ahash_tfm = __crypto_ahash_cast(tfm);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+ struct tegra_se_alg *se_alg;
+ const char *algname;
+ int ret;
+
+ algname = crypto_tfm_alg_name(tfm);
+ se_alg = container_of(alg, struct tegra_se_alg, alg.ahash.base);
+
+ crypto_ahash_set_reqsize(ahash_tfm, sizeof(struct tegra_sha_reqctx));
+
+ ctx->se = se_alg->se_dev;
+ ctx->fallback = false;
+ ctx->key_id = 0;
+
+ ret = se_algname_to_algid(algname);
+ if (ret < 0) {
+ dev_err(ctx->se->dev, "invalid algorithm\n");
+ return ret;
+ }
+
+ if (se_alg->alg_base)
+ tegra_sha_init_fallback(ahash_tfm, ctx, algname);
+
+ ctx->alg = ret;
+
+ return 0;
+}
+
+static void tegra_sha_cra_exit(struct crypto_tfm *tfm)
+{
+ struct tegra_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->fallback_tfm)
+ crypto_free_ahash(ctx->fallback_tfm);
+
+ tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
+}
+
+static int tegra_sha_init(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
+
+ if (ctx->fallback)
+ return tegra_sha_fallback_init(req);
+
+ rctx->total_len = 0;
+ rctx->datbuf.size = 0;
+ rctx->residue.size = 0;
+ rctx->key_id = ctx->key_id;
+ rctx->task = SHA_FIRST;
+ rctx->alg = ctx->alg;
+ rctx->blk_size = crypto_ahash_blocksize(tfm);
+ rctx->digest.size = crypto_ahash_digestsize(tfm);
+
+ rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size,
+ &rctx->digest.addr, GFP_KERNEL);
+ if (!rctx->digest.buf)
+ goto digbuf_fail;
+
+ rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size,
+ &rctx->residue.addr, GFP_KERNEL);
+ if (!rctx->residue.buf)
+ goto resbuf_fail;
+
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf)
+ goto datbuf_fail;
+
+ return 0;
+
+datbuf_fail:
+ dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
+ rctx->residue.addr);
+resbuf_fail:
+ dma_free_coherent(se->dev, SE_SHA_BUFLEN, rctx->datbuf.buf,
+ rctx->datbuf.addr);
+digbuf_fail:
+ return -ENOMEM;
+}
+
+static int tegra_hmac_fallback_setkey(struct tegra_sha_ctx *ctx, const u8 *key,
+ unsigned int keylen)
+{
+ if (!ctx->fallback_tfm) {
+ dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
+ return -EINVAL;
+ }
+
+ ctx->fallback = true;
+ return crypto_ahash_setkey(ctx->fallback_tfm, key, keylen);
+}
+
+static int tegra_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (aes_check_keylen(keylen))
+ return tegra_hmac_fallback_setkey(ctx, key, keylen);
+
+ ctx->fallback = false;
+
+ return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+}
+
+static int tegra_sha_update(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (ctx->fallback)
+ return tegra_sha_fallback_update(req);
+
+ rctx->task |= SHA_UPDATE;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
+}
+
+static int tegra_sha_final(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (ctx->fallback)
+ return tegra_sha_fallback_final(req);
+
+ rctx->task |= SHA_FINAL;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
+}
+
+static int tegra_sha_finup(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (ctx->fallback)
+ return tegra_sha_fallback_finup(req);
+
+ rctx->task |= SHA_UPDATE | SHA_FINAL;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
+}
+
+static int tegra_sha_digest(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (ctx->fallback)
+ return tegra_sha_fallback_digest(req);
+
+ tegra_sha_init(req);
+ rctx->task |= SHA_UPDATE | SHA_FINAL;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
+}
+
+static int tegra_sha_export(struct ahash_request *req, void *out)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (ctx->fallback)
+ return tegra_sha_fallback_export(req, out);
+
+ memcpy(out, rctx, sizeof(*rctx));
+
+ return 0;
+}
+
+static int tegra_sha_import(struct ahash_request *req, const void *in)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (ctx->fallback)
+ return tegra_sha_fallback_import(req, in);
+
+ memcpy(rctx, in, sizeof(*rctx));
+
+ return 0;
+}
+
+static struct tegra_se_alg tegra_hash_algs[] = {
+ {
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "tegra-se-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "tegra-se-sha224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "tegra-se-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "tegra-se-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "tegra-se-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .halg.digestsize = SHA3_224_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "sha3-224",
+ .cra_driver_name = "tegra-se-sha3-224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA3_224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .halg.digestsize = SHA3_256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "sha3-256",
+ .cra_driver_name = "tegra-se-sha3-256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA3_256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .halg.digestsize = SHA3_384_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "sha3-384",
+ .cra_driver_name = "tegra-se-sha3-384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .halg.digestsize = SHA3_512_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "sha3-512",
+ .cra_driver_name = "tegra-se-sha3-512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH,
+ .cra_blocksize = SHA3_512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg_base = "sha224",
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .setkey = tegra_hmac_setkey,
+ .halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "tegra-se-hmac-sha224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg_base = "sha256",
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .setkey = tegra_hmac_setkey,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "tegra-se-hmac-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg_base = "sha384",
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .setkey = tegra_hmac_setkey,
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "tegra-se-hmac-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }, {
+ .alg_base = "sha512",
+ .alg.ahash.op.do_one_request = tegra_sha_do_one_req,
+ .alg.ahash.base = {
+ .init = tegra_sha_init,
+ .update = tegra_sha_update,
+ .final = tegra_sha_final,
+ .finup = tegra_sha_finup,
+ .digest = tegra_sha_digest,
+ .export = tegra_sha_export,
+ .import = tegra_sha_import,
+ .setkey = tegra_hmac_setkey,
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct tegra_sha_reqctx),
+ .halg.base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "tegra-se-hmac-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct tegra_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = tegra_sha_cra_init,
+ .cra_exit = tegra_sha_cra_exit,
+ }
+ }
+ }
+};
+
+static int tegra_hash_kac_manifest(u32 user, u32 alg, u32 keylen)
+{
+ int manifest;
+
+ manifest = SE_KAC_USER_NS;
+
+ switch (alg) {
+ case SE_ALG_HMAC_SHA224:
+ case SE_ALG_HMAC_SHA256:
+ case SE_ALG_HMAC_SHA384:
+ case SE_ALG_HMAC_SHA512:
+ manifest |= SE_KAC_HMAC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ manifest |= SE_KAC_SIZE_128;
+ break;
+ case AES_KEYSIZE_192:
+ manifest |= SE_KAC_SIZE_192;
+ break;
+ case AES_KEYSIZE_256:
+ default:
+ manifest |= SE_KAC_SIZE_256;
+ break;
+ }
+
+ return manifest;
+}
+
+int tegra_init_hash(struct tegra_se *se)
+{
+ struct ahash_engine_alg *alg;
+ int i, ret;
+
+ se->manifest = tegra_hash_kac_manifest;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_hash_algs); i++) {
+ tegra_hash_algs[i].se_dev = se;
+ alg = &tegra_hash_algs[i].alg.ahash;
+
+ ret = crypto_engine_register_ahash(alg);
+ if (ret) {
+ dev_err(se->dev, "failed to register %s\n",
+ alg->base.halg.base.cra_name);
+ goto sha_err;
+ }
+ }
+
+ return 0;
+
+sha_err:
+ while (i--)
+ crypto_engine_unregister_ahash(&tegra_hash_algs[i].alg.ahash);
+
+ return ret;
+}
+
+void tegra_deinit_hash(struct tegra_se *se)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_hash_algs); i++)
+ crypto_engine_unregister_ahash(&tegra_hash_algs[i].alg.ahash);
+}
diff --git a/drivers/crypto/tegra/tegra-se-key.c b/drivers/crypto/tegra/tegra-se-key.c
new file mode 100644
index 000000000000..ac14678dbd30
--- /dev/null
+++ b/drivers/crypto/tegra/tegra-se-key.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+/*
+ * Crypto driver file to manage keys of NVIDIA Security Engine.
+ */
+
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <crypto/aes.h>
+
+#include "tegra-se.h"
+
+#define SE_KEY_FULL_MASK GENMASK(SE_MAX_KEYSLOT, 0)
+
+/* Reserve keyslot 0, 14, 15 */
+#define SE_KEY_RSVD_MASK (BIT(0) | BIT(14) | BIT(15))
+#define SE_KEY_VALID_MASK (SE_KEY_FULL_MASK & ~SE_KEY_RSVD_MASK)
+
+/* Mutex lock to guard keyslots */
+static DEFINE_MUTEX(kslt_lock);
+
+/* Keyslot bitmask (0 = available, 1 = in use/not available) */
+static u16 tegra_se_keyslots = SE_KEY_RSVD_MASK;
+
+static u16 tegra_keyslot_alloc(void)
+{
+ u16 keyid;
+
+ mutex_lock(&kslt_lock);
+ /* Check if all key slots are full */
+ if (tegra_se_keyslots == GENMASK(SE_MAX_KEYSLOT, 0)) {
+ mutex_unlock(&kslt_lock);
+ return 0;
+ }
+
+ keyid = ffz(tegra_se_keyslots);
+ tegra_se_keyslots |= BIT(keyid);
+
+ mutex_unlock(&kslt_lock);
+
+ return keyid;
+}
+
+static void tegra_keyslot_free(u16 slot)
+{
+ mutex_lock(&kslt_lock);
+ tegra_se_keyslots &= ~(BIT(slot));
+ mutex_unlock(&kslt_lock);
+}
+
+static unsigned int tegra_key_prep_ins_cmd(struct tegra_se *se, u32 *cpuvaddr,
+ const u32 *key, u32 keylen, u16 slot, u32 alg)
+{
+ int i = 0, j;
+
+ cpuvaddr[i++] = host1x_opcode_setpayload(1);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->op);
+ cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_DUMMY;
+
+ cpuvaddr[i++] = host1x_opcode_setpayload(1);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->manifest);
+ cpuvaddr[i++] = se->manifest(se->owner, alg, keylen);
+ cpuvaddr[i++] = host1x_opcode_setpayload(1);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->key_dst);
+
+ cpuvaddr[i++] = SE_AES_KEY_DST_INDEX(slot);
+
+ for (j = 0; j < keylen / 4; j++) {
+ /* Set key address */
+ cpuvaddr[i++] = host1x_opcode_setpayload(1);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->key_addr);
+ cpuvaddr[i++] = j;
+
+ /* Set key data */
+ cpuvaddr[i++] = host1x_opcode_setpayload(1);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->key_data);
+ cpuvaddr[i++] = key[j];
+ }
+
+ cpuvaddr[i++] = host1x_opcode_setpayload(1);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->config);
+ cpuvaddr[i++] = SE_CFG_INS;
+
+ cpuvaddr[i++] = host1x_opcode_setpayload(1);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->op);
+ cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_START |
+ SE_AES_OP_LASTBUF;
+
+ cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
+ cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
+ host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
+
+ dev_dbg(se->dev, "key-slot %u key-manifest %#x\n",
+ slot, se->manifest(se->owner, alg, keylen));
+
+ return i;
+}
+
+static bool tegra_key_in_kslt(u32 keyid)
+{
+ bool ret;
+
+ if (keyid > SE_MAX_KEYSLOT)
+ return false;
+
+ mutex_lock(&kslt_lock);
+ ret = ((BIT(keyid) & SE_KEY_VALID_MASK) &&
+ (BIT(keyid) & tegra_se_keyslots));
+ mutex_unlock(&kslt_lock);
+
+ return ret;
+}
+
+static int tegra_key_insert(struct tegra_se *se, const u8 *key,
+ u32 keylen, u16 slot, u32 alg)
+{
+ const u32 *keyval = (u32 *)key;
+ u32 *addr = se->cmdbuf->addr, size;
+
+ size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg);
+
+ return tegra_se_host1x_submit(se, size);
+}
+
+void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg)
+{
+ u8 zkey[AES_MAX_KEY_SIZE] = {0};
+
+ if (!keyid)
+ return;
+
+ /* Overwrite the key with 0s */
+ tegra_key_insert(se, zkey, AES_MAX_KEY_SIZE, keyid, alg);
+
+ tegra_keyslot_free(keyid);
+}
+
+int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid)
+{
+ int ret;
+
+ /* Use the existing slot if it is already allocated */
+ if (!tegra_key_in_kslt(*keyid)) {
+ *keyid = tegra_keyslot_alloc();
+ if (!(*keyid)) {
+ dev_err(se->dev, "failed to allocate key slot\n");
+ return -ENOMEM;
+ }
+ }
+
+ ret = tegra_key_insert(se, key, keylen, *keyid, alg);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/crypto/tegra/tegra-se-main.c b/drivers/crypto/tegra/tegra-se-main.c
new file mode 100644
index 000000000000..9955874b3dc3
--- /dev/null
+++ b/drivers/crypto/tegra/tegra-se-main.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+/*
+ * Crypto driver for NVIDIA Security Engine in Tegra Chips
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
+
+#include <crypto/engine.h>
+
+#include "tegra-se.h"
+
+static struct host1x_bo *tegra_se_cmdbuf_get(struct host1x_bo *host_bo)
+{
+ struct tegra_se_cmdbuf *cmdbuf = container_of(host_bo, struct tegra_se_cmdbuf, bo);
+
+ kref_get(&cmdbuf->ref);
+
+ return host_bo;
+}
+
+static void tegra_se_cmdbuf_release(struct kref *ref)
+{
+ struct tegra_se_cmdbuf *cmdbuf = container_of(ref, struct tegra_se_cmdbuf, ref);
+
+ dma_free_attrs(cmdbuf->dev, cmdbuf->size, cmdbuf->addr,
+ cmdbuf->iova, 0);
+
+ kfree(cmdbuf);
+}
+
+static void tegra_se_cmdbuf_put(struct host1x_bo *host_bo)
+{
+ struct tegra_se_cmdbuf *cmdbuf = container_of(host_bo, struct tegra_se_cmdbuf, bo);
+
+ kref_put(&cmdbuf->ref, tegra_se_cmdbuf_release);
+}
+
+static struct host1x_bo_mapping *
+tegra_se_cmdbuf_pin(struct device *dev, struct host1x_bo *bo, enum dma_data_direction direction)
+{
+ struct tegra_se_cmdbuf *cmdbuf = container_of(bo, struct tegra_se_cmdbuf, bo);
+ struct host1x_bo_mapping *map;
+ int err;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&map->ref);
+ map->bo = host1x_bo_get(bo);
+ map->direction = direction;
+ map->dev = dev;
+
+ map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
+ if (!map->sgt) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ err = dma_get_sgtable(dev, map->sgt, cmdbuf->addr,
+ cmdbuf->iova, cmdbuf->words * 4);
+ if (err)
+ goto free_sgt;
+
+ err = dma_map_sgtable(dev, map->sgt, direction, 0);
+ if (err)
+ goto free_sgt;
+
+ map->phys = sg_dma_address(map->sgt->sgl);
+ map->size = cmdbuf->words * 4;
+ map->chunks = err;
+
+ return map;
+
+free_sgt:
+ sg_free_table(map->sgt);
+ kfree(map->sgt);
+free:
+ kfree(map);
+ return ERR_PTR(err);
+}
+
+static void tegra_se_cmdbuf_unpin(struct host1x_bo_mapping *map)
+{
+ if (!map)
+ return;
+
+ dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
+ sg_free_table(map->sgt);
+ kfree(map->sgt);
+ host1x_bo_put(map->bo);
+
+ kfree(map);
+}
+
+static void *tegra_se_cmdbuf_mmap(struct host1x_bo *host_bo)
+{
+ struct tegra_se_cmdbuf *cmdbuf = container_of(host_bo, struct tegra_se_cmdbuf, bo);
+
+ return cmdbuf->addr;
+}
+
+static void tegra_se_cmdbuf_munmap(struct host1x_bo *host_bo, void *addr)
+{
+}
+
+static const struct host1x_bo_ops tegra_se_cmdbuf_ops = {
+ .get = tegra_se_cmdbuf_get,
+ .put = tegra_se_cmdbuf_put,
+ .pin = tegra_se_cmdbuf_pin,
+ .unpin = tegra_se_cmdbuf_unpin,
+ .mmap = tegra_se_cmdbuf_mmap,
+ .munmap = tegra_se_cmdbuf_munmap,
+};
+
+static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssize_t size)
+{
+ struct tegra_se_cmdbuf *cmdbuf;
+ struct device *dev = se->dev->parent;
+
+ cmdbuf = kzalloc(sizeof(*cmdbuf), GFP_KERNEL);
+ if (!cmdbuf)
+ return NULL;
+
+ cmdbuf->addr = dma_alloc_attrs(dev, size, &cmdbuf->iova,
+ GFP_KERNEL, 0);
+ if (!cmdbuf->addr)
+ return NULL;
+
+ cmdbuf->size = size;
+ cmdbuf->dev = dev;
+
+ host1x_bo_init(&cmdbuf->bo, &tegra_se_cmdbuf_ops);
+ kref_init(&cmdbuf->ref);
+
+ return cmdbuf;
+}
+
+int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
+{
+ struct host1x_job *job;
+ int ret;
+
+ job = host1x_job_alloc(se->channel, 1, 0, true);
+ if (!job) {
+ dev_err(se->dev, "failed to allocate host1x job\n");
+ return -ENOMEM;
+ }
+
+ job->syncpt = host1x_syncpt_get(se->syncpt);
+ job->syncpt_incrs = 1;
+ job->client = &se->client;
+ job->class = se->client.class;
+ job->serialize = true;
+ job->engine_fallback_streamid = se->stream_id;
+ job->engine_streamid_offset = SE_STREAM_ID;
+
+ se->cmdbuf->words = size;
+
+ host1x_job_add_gather(job, &se->cmdbuf->bo, size, 0);
+
+ ret = host1x_job_pin(job, se->dev);
+ if (ret) {
+ dev_err(se->dev, "failed to pin host1x job\n");
+ goto job_put;
+ }
+
+ ret = host1x_job_submit(job);
+ if (ret) {
+ dev_err(se->dev, "failed to submit host1x job\n");
+ goto job_unpin;
+ }
+
+ ret = host1x_syncpt_wait(job->syncpt, job->syncpt_end,
+ MAX_SCHEDULE_TIMEOUT, NULL);
+ if (ret) {
+ dev_err(se->dev, "host1x job timed out\n");
+ return ret;
+ }
+
+ host1x_job_put(job);
+ return 0;
+
+job_unpin:
+ host1x_job_unpin(job);
+job_put:
+ host1x_job_put(job);
+
+ return ret;
+}
+
+static int tegra_se_client_init(struct host1x_client *client)
+{
+ struct tegra_se *se = container_of(client, struct tegra_se, client);
+ int ret;
+
+ se->channel = host1x_channel_request(&se->client);
+ if (!se->channel) {
+ dev_err(se->dev, "host1x channel map failed\n");
+ return -ENODEV;
+ }
+
+ se->syncpt = host1x_syncpt_request(&se->client, 0);
+ if (!se->syncpt) {
+ dev_err(se->dev, "host1x syncpt allocation failed\n");
+ ret = -EINVAL;
+ goto channel_put;
+ }
+
+ se->syncpt_id = host1x_syncpt_id(se->syncpt);
+
+ se->cmdbuf = tegra_se_host1x_bo_alloc(se, SZ_4K);
+ if (!se->cmdbuf) {
+ ret = -ENOMEM;
+ goto syncpt_put;
+ }
+
+ ret = se->hw->init_alg(se);
+ if (ret) {
+ dev_err(se->dev, "failed to register algorithms\n");
+ goto cmdbuf_put;
+ }
+
+ return 0;
+
+cmdbuf_put:
+ tegra_se_cmdbuf_put(&se->cmdbuf->bo);
+syncpt_put:
+ host1x_syncpt_put(se->syncpt);
+channel_put:
+ host1x_channel_put(se->channel);
+
+ return ret;
+}
+
+static int tegra_se_client_deinit(struct host1x_client *client)
+{
+ struct tegra_se *se = container_of(client, struct tegra_se, client);
+
+ se->hw->deinit_alg(se);
+ tegra_se_cmdbuf_put(&se->cmdbuf->bo);
+ host1x_syncpt_put(se->syncpt);
+ host1x_channel_put(se->channel);
+
+ return 0;
+}
+
+static const struct host1x_client_ops tegra_se_client_ops = {
+ .init = tegra_se_client_init,
+ .exit = tegra_se_client_deinit,
+};
+
+static int tegra_se_host1x_register(struct tegra_se *se)
+{
+ INIT_LIST_HEAD(&se->client.list);
+ se->client.dev = se->dev;
+ se->client.ops = &tegra_se_client_ops;
+ se->client.class = se->hw->host1x_class;
+ se->client.num_syncpts = 1;
+
+ host1x_client_register(&se->client);
+
+ return 0;
+}
+
+static int tegra_se_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra_se *se;
+ int ret;
+
+ se = devm_kzalloc(dev, sizeof(*se), GFP_KERNEL);
+ if (!se)
+ return -ENOMEM;
+
+ se->dev = dev;
+ se->owner = TEGRA_GPSE_ID;
+ se->hw = device_get_match_data(&pdev->dev);
+
+ se->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(se->base))
+ return PTR_ERR(se->base);
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39));
+ platform_set_drvdata(pdev, se);
+
+ se->clk = devm_clk_get_enabled(se->dev, NULL);
+ if (IS_ERR(se->clk))
+ return dev_err_probe(dev, PTR_ERR(se->clk),
+ "failed to enable clocks\n");
+
+ if (!tegra_dev_iommu_get_stream_id(dev, &se->stream_id))
+ return dev_err_probe(dev, -ENODEV,
+ "failed to get IOMMU stream ID\n");
+
+ writel(se->stream_id, se->base + SE_STREAM_ID);
+
+ se->engine = crypto_engine_alloc_init(dev, 0);
+ if (!se->engine)
+ return dev_err_probe(dev, -ENOMEM, "failed to init crypto engine\n");
+
+ ret = crypto_engine_start(se->engine);
+ if (ret) {
+ crypto_engine_exit(se->engine);
+ return dev_err_probe(dev, ret, "failed to start crypto engine\n");
+ }
+
+ ret = tegra_se_host1x_register(se);
+ if (ret) {
+ crypto_engine_stop(se->engine);
+ crypto_engine_exit(se->engine);
+ return dev_err_probe(dev, ret, "failed to init host1x params\n");
+ }
+
+ return 0;
+}
+
+static void tegra_se_remove(struct platform_device *pdev)
+{
+ struct tegra_se *se = platform_get_drvdata(pdev);
+
+ crypto_engine_stop(se->engine);
+ crypto_engine_exit(se->engine);
+ iommu_fwspec_free(se->dev);
+ host1x_client_unregister(&se->client);
+}
+
+static const struct tegra_se_regs tegra234_aes1_regs = {
+ .config = SE_AES1_CFG,
+ .op = SE_AES1_OPERATION,
+ .last_blk = SE_AES1_LAST_BLOCK,
+ .linear_ctr = SE_AES1_LINEAR_CTR,
+ .aad_len = SE_AES1_AAD_LEN,
+ .cryp_msg_len = SE_AES1_CRYPTO_MSG_LEN,
+ .manifest = SE_AES1_KEYMANIFEST,
+ .key_addr = SE_AES1_KEY_ADDR,
+ .key_data = SE_AES1_KEY_DATA,
+ .key_dst = SE_AES1_KEY_DST,
+ .result = SE_AES1_CMAC_RESULT,
+};
+
+static const struct tegra_se_regs tegra234_hash_regs = {
+ .config = SE_SHA_CFG,
+ .op = SE_SHA_OPERATION,
+ .manifest = SE_SHA_KEYMANIFEST,
+ .key_addr = SE_SHA_KEY_ADDR,
+ .key_data = SE_SHA_KEY_DATA,
+ .key_dst = SE_SHA_KEY_DST,
+ .result = SE_SHA_HASH_RESULT,
+};
+
+static const struct tegra_se_hw tegra234_aes_hw = {
+ .regs = &tegra234_aes1_regs,
+ .kac_ver = 1,
+ .host1x_class = 0x3b,
+ .init_alg = tegra_init_aes,
+ .deinit_alg = tegra_deinit_aes,
+};
+
+static const struct tegra_se_hw tegra234_hash_hw = {
+ .regs = &tegra234_hash_regs,
+ .kac_ver = 1,
+ .host1x_class = 0x3d,
+ .init_alg = tegra_init_hash,
+ .deinit_alg = tegra_deinit_hash,
+};
+
+static const struct of_device_id tegra_se_of_match[] = {
+ {
+ .compatible = "nvidia,tegra234-se-aes",
+ .data = &tegra234_aes_hw
+ }, {
+ .compatible = "nvidia,tegra234-se-hash",
+ .data = &tegra234_hash_hw,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_se_of_match);
+
+static struct platform_driver tegra_se_driver = {
+ .driver = {
+ .name = "tegra-se",
+ .of_match_table = tegra_se_of_match,
+ },
+ .probe = tegra_se_probe,
+ .remove_new = tegra_se_remove,
+};
+
+static int tegra_se_host1x_probe(struct host1x_device *dev)
+{
+ return host1x_device_init(dev);
+}
+
+static int tegra_se_host1x_remove(struct host1x_device *dev)
+{
+ host1x_device_exit(dev);
+
+ return 0;
+}
+
+static struct host1x_driver tegra_se_host1x_driver = {
+ .driver = {
+ .name = "tegra-se-host1x",
+ },
+ .probe = tegra_se_host1x_probe,
+ .remove = tegra_se_host1x_remove,
+ .subdevs = tegra_se_of_match,
+};
+
+static int __init tegra_se_module_init(void)
+{
+ int ret;
+
+ ret = host1x_driver_register(&tegra_se_host1x_driver);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&tegra_se_driver);
+}
+
+static void __exit tegra_se_module_exit(void)
+{
+ host1x_driver_unregister(&tegra_se_host1x_driver);
+ platform_driver_unregister(&tegra_se_driver);
+}
+
+module_init(tegra_se_module_init);
+module_exit(tegra_se_module_exit);
+
+MODULE_DESCRIPTION("NVIDIA Tegra Security Engine Driver");
+MODULE_AUTHOR("Akhil R <akhilrajeev@nvidia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/tegra/tegra-se.h b/drivers/crypto/tegra/tegra-se.h
new file mode 100644
index 000000000000..b9dd7ceb8783
--- /dev/null
+++ b/drivers/crypto/tegra/tegra-se.h
@@ -0,0 +1,560 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ *
+ * Header file for NVIDIA Security Engine driver.
+ */
+
+#ifndef _TEGRA_SE_H
+#define _TEGRA_SE_H
+
+#include <linux/bitfield.h>
+#include <linux/iommu.h>
+#include <linux/host1x.h>
+#include <crypto/aead.h>
+#include <crypto/engine.h>
+#include <crypto/hash.h>
+#include <crypto/sha1.h>
+#include <crypto/sha3.h>
+#include <crypto/skcipher.h>
+
+#define SE_OWNERSHIP 0x14
+#define SE_OWNERSHIP_UID(x) FIELD_GET(GENMASK(7, 0), x)
+#define TEGRA_GPSE_ID 3
+
+#define SE_STREAM_ID 0x90
+
+#define SE_SHA_CFG 0x4004
+#define SE_SHA_KEY_ADDR 0x4094
+#define SE_SHA_KEY_DATA 0x4098
+#define SE_SHA_KEYMANIFEST 0x409c
+#define SE_SHA_CRYPTO_CFG 0x40a4
+#define SE_SHA_KEY_DST 0x40a8
+#define SE_SHA_SRC_KSLT 0x4180
+#define SE_SHA_TGT_KSLT 0x4184
+#define SE_SHA_MSG_LENGTH 0x401c
+#define SE_SHA_OPERATION 0x407c
+#define SE_SHA_HASH_RESULT 0x40b0
+
+#define SE_SHA_ENC_MODE(x) FIELD_PREP(GENMASK(31, 24), x)
+#define SE_SHA_ENC_MODE_SHA1 SE_SHA_ENC_MODE(0)
+#define SE_SHA_ENC_MODE_SHA224 SE_SHA_ENC_MODE(4)
+#define SE_SHA_ENC_MODE_SHA256 SE_SHA_ENC_MODE(5)
+#define SE_SHA_ENC_MODE_SHA384 SE_SHA_ENC_MODE(6)
+#define SE_SHA_ENC_MODE_SHA512 SE_SHA_ENC_MODE(7)
+#define SE_SHA_ENC_MODE_SHA_CTX_INTEGRITY SE_SHA_ENC_MODE(8)
+#define SE_SHA_ENC_MODE_SHA3_224 SE_SHA_ENC_MODE(9)
+#define SE_SHA_ENC_MODE_SHA3_256 SE_SHA_ENC_MODE(10)
+#define SE_SHA_ENC_MODE_SHA3_384 SE_SHA_ENC_MODE(11)
+#define SE_SHA_ENC_MODE_SHA3_512 SE_SHA_ENC_MODE(12)
+#define SE_SHA_ENC_MODE_SHAKE128 SE_SHA_ENC_MODE(13)
+#define SE_SHA_ENC_MODE_SHAKE256 SE_SHA_ENC_MODE(14)
+#define SE_SHA_ENC_MODE_HMAC_SHA256_1KEY SE_SHA_ENC_MODE(0)
+#define SE_SHA_ENC_MODE_HMAC_SHA256_2KEY SE_SHA_ENC_MODE(1)
+#define SE_SHA_ENC_MODE_SM3_256 SE_SHA_ENC_MODE(0)
+
+#define SE_SHA_CFG_ENC_ALG(x) FIELD_PREP(GENMASK(15, 12), x)
+#define SE_SHA_ENC_ALG_NOP SE_SHA_CFG_ENC_ALG(0)
+#define SE_SHA_ENC_ALG_SHA_ENC SE_SHA_CFG_ENC_ALG(1)
+#define SE_SHA_ENC_ALG_RNG SE_SHA_CFG_ENC_ALG(2)
+#define SE_SHA_ENC_ALG_SHA SE_SHA_CFG_ENC_ALG(3)
+#define SE_SHA_ENC_ALG_SM3 SE_SHA_CFG_ENC_ALG(4)
+#define SE_SHA_ENC_ALG_HMAC SE_SHA_CFG_ENC_ALG(7)
+#define SE_SHA_ENC_ALG_KDF SE_SHA_CFG_ENC_ALG(8)
+#define SE_SHA_ENC_ALG_KEY_INVLD SE_SHA_CFG_ENC_ALG(10)
+#define SE_SHA_ENC_ALG_KEY_INQUIRE SE_SHA_CFG_ENC_ALG(12)
+#define SE_SHA_ENC_ALG_INS SE_SHA_CFG_ENC_ALG(13)
+
+#define SE_SHA_OP_LASTBUF FIELD_PREP(BIT(16), 1)
+#define SE_SHA_OP_WRSTALL FIELD_PREP(BIT(15), 1)
+
+#define SE_SHA_OP_OP(x) FIELD_PREP(GENMASK(2, 0), x)
+#define SE_SHA_OP_START SE_SHA_OP_OP(1)
+#define SE_SHA_OP_RESTART_OUT SE_SHA_OP_OP(2)
+#define SE_SHA_OP_RESTART_IN SE_SHA_OP_OP(4)
+#define SE_SHA_OP_RESTART_INOUT SE_SHA_OP_OP(5)
+#define SE_SHA_OP_DUMMY SE_SHA_OP_OP(6)
+
+#define SE_SHA_CFG_DEC_ALG(x) FIELD_PREP(GENMASK(11, 8), x)
+#define SE_SHA_DEC_ALG_NOP SE_SHA_CFG_DEC_ALG(0)
+#define SE_SHA_DEC_ALG_AES_DEC SE_SHA_CFG_DEC_ALG(1)
+#define SE_SHA_DEC_ALG_HMAC SE_SHA_CFG_DEC_ALG(7)
+#define SE_SHA_DEC_ALG_HMAC_VERIFY SE_SHA_CFG_DEC_ALG(9)
+
+#define SE_SHA_CFG_DST(x) FIELD_PREP(GENMASK(4, 2), x)
+#define SE_SHA_DST_MEMORY SE_SHA_CFG_DST(0)
+#define SE_SHA_DST_HASH_REG SE_SHA_CFG_DST(1)
+#define SE_SHA_DST_KEYTABLE SE_SHA_CFG_DST(2)
+#define SE_SHA_DST_SRK SE_SHA_CFG_DST(3)
+
+#define SE_SHA_TASK_HASH_INIT BIT(0)
+
+/* AES Configuration */
+#define SE_AES0_CFG 0x1004
+#define SE_AES0_CRYPTO_CONFIG 0x1008
+#define SE_AES0_KEY_DST 0x1030
+#define SE_AES0_OPERATION 0x1038
+#define SE_AES0_LINEAR_CTR 0x101c
+#define SE_AES0_LAST_BLOCK 0x102c
+#define SE_AES0_KEY_ADDR 0x10bc
+#define SE_AES0_KEY_DATA 0x10c0
+#define SE_AES0_CMAC_RESULT 0x10c4
+#define SE_AES0_SRC_KSLT 0x1100
+#define SE_AES0_TGT_KSLT 0x1104
+#define SE_AES0_KEYMANIFEST 0x1114
+#define SE_AES0_AAD_LEN 0x112c
+#define SE_AES0_CRYPTO_MSG_LEN 0x1134
+
+#define SE_AES1_CFG 0x2004
+#define SE_AES1_CRYPTO_CONFIG 0x2008
+#define SE_AES1_KEY_DST 0x2030
+#define SE_AES1_OPERATION 0x2038
+#define SE_AES1_LINEAR_CTR 0x201c
+#define SE_AES1_LAST_BLOCK 0x202c
+#define SE_AES1_KEY_ADDR 0x20bc
+#define SE_AES1_KEY_DATA 0x20c0
+#define SE_AES1_CMAC_RESULT 0x20c4
+#define SE_AES1_SRC_KSLT 0x2100
+#define SE_AES1_TGT_KSLT 0x2104
+#define SE_AES1_KEYMANIFEST 0x2114
+#define SE_AES1_AAD_LEN 0x212c
+#define SE_AES1_CRYPTO_MSG_LEN 0x2134
+
+#define SE_AES_CFG_ENC_MODE(x) FIELD_PREP(GENMASK(31, 24), x)
+#define SE_AES_ENC_MODE_GMAC SE_AES_CFG_ENC_MODE(3)
+#define SE_AES_ENC_MODE_GCM SE_AES_CFG_ENC_MODE(4)
+#define SE_AES_ENC_MODE_GCM_FINAL SE_AES_CFG_ENC_MODE(5)
+#define SE_AES_ENC_MODE_CMAC SE_AES_CFG_ENC_MODE(7)
+#define SE_AES_ENC_MODE_CBC_MAC SE_AES_CFG_ENC_MODE(12)
+
+#define SE_AES_CFG_DEC_MODE(x) FIELD_PREP(GENMASK(23, 16), x)
+#define SE_AES_DEC_MODE_GMAC SE_AES_CFG_DEC_MODE(3)
+#define SE_AES_DEC_MODE_GCM SE_AES_CFG_DEC_MODE(4)
+#define SE_AES_DEC_MODE_GCM_FINAL SE_AES_CFG_DEC_MODE(5)
+#define SE_AES_DEC_MODE_CBC_MAC SE_AES_CFG_DEC_MODE(12)
+
+#define SE_AES_CFG_ENC_ALG(x) FIELD_PREP(GENMASK(15, 12), x)
+#define SE_AES_ENC_ALG_NOP SE_AES_CFG_ENC_ALG(0)
+#define SE_AES_ENC_ALG_AES_ENC SE_AES_CFG_ENC_ALG(1)
+#define SE_AES_ENC_ALG_RNG SE_AES_CFG_ENC_ALG(2)
+#define SE_AES_ENC_ALG_SHA SE_AES_CFG_ENC_ALG(3)
+#define SE_AES_ENC_ALG_HMAC SE_AES_CFG_ENC_ALG(7)
+#define SE_AES_ENC_ALG_KDF SE_AES_CFG_ENC_ALG(8)
+#define SE_AES_ENC_ALG_INS SE_AES_CFG_ENC_ALG(13)
+
+#define SE_AES_CFG_DEC_ALG(x) FIELD_PREP(GENMASK(11, 8), x)
+#define SE_AES_DEC_ALG_NOP SE_AES_CFG_DEC_ALG(0)
+#define SE_AES_DEC_ALG_AES_DEC SE_AES_CFG_DEC_ALG(1)
+
+#define SE_AES_CFG_DST(x) FIELD_PREP(GENMASK(4, 2), x)
+#define SE_AES_DST_MEMORY SE_AES_CFG_DST(0)
+#define SE_AES_DST_HASH_REG SE_AES_CFG_DST(1)
+#define SE_AES_DST_KEYTABLE SE_AES_CFG_DST(2)
+#define SE_AES_DST_SRK SE_AES_CFG_DST(3)
+
+/* AES Crypto Configuration */
+#define SE_AES_KEY2_INDEX(x) FIELD_PREP(GENMASK(31, 28), x)
+#define SE_AES_KEY_INDEX(x) FIELD_PREP(GENMASK(27, 24), x)
+
+#define SE_AES_CRYPTO_CFG_SCC_DIS FIELD_PREP(BIT(20), 1)
+
+#define SE_AES_CRYPTO_CFG_CTR_CNTN(x) FIELD_PREP(GENMASK(18, 11), x)
+
+#define SE_AES_CRYPTO_CFG_IV_MODE(x) FIELD_PREP(BIT(10), x)
+#define SE_AES_IV_MODE_SWIV SE_AES_CRYPTO_CFG_IV_MODE(0)
+#define SE_AES_IV_MODE_HWIV SE_AES_CRYPTO_CFG_IV_MODE(1)
+
+#define SE_AES_CRYPTO_CFG_CORE_SEL(x) FIELD_PREP(BIT(9), x)
+#define SE_AES_CORE_SEL_DECRYPT SE_AES_CRYPTO_CFG_CORE_SEL(0)
+#define SE_AES_CORE_SEL_ENCRYPT SE_AES_CRYPTO_CFG_CORE_SEL(1)
+
+#define SE_AES_CRYPTO_CFG_IV_SEL(x) FIELD_PREP(GENMASK(8, 7), x)
+#define SE_AES_IV_SEL_UPDATED SE_AES_CRYPTO_CFG_IV_SEL(1)
+#define SE_AES_IV_SEL_REG SE_AES_CRYPTO_CFG_IV_SEL(2)
+#define SE_AES_IV_SEL_RANDOM SE_AES_CRYPTO_CFG_IV_SEL(3)
+
+#define SE_AES_CRYPTO_CFG_VCTRAM_SEL(x) FIELD_PREP(GENMASK(6, 5), x)
+#define SE_AES_VCTRAM_SEL_MEMORY SE_AES_CRYPTO_CFG_VCTRAM_SEL(0)
+#define SE_AES_VCTRAM_SEL_TWEAK SE_AES_CRYPTO_CFG_VCTRAM_SEL(1)
+#define SE_AES_VCTRAM_SEL_AESOUT SE_AES_CRYPTO_CFG_VCTRAM_SEL(2)
+#define SE_AES_VCTRAM_SEL_PREV_MEM SE_AES_CRYPTO_CFG_VCTRAM_SEL(3)
+
+#define SE_AES_CRYPTO_CFG_INPUT_SEL(x) FIELD_PREP(GENMASK(4, 3), x)
+#define SE_AES_INPUT_SEL_MEMORY SE_AES_CRYPTO_CFG_INPUT_SEL(0)
+#define SE_AES_INPUT_SEL_RANDOM SE_AES_CRYPTO_CFG_INPUT_SEL(1)
+#define SE_AES_INPUT_SEL_AESOUT SE_AES_CRYPTO_CFG_INPUT_SEL(2)
+#define SE_AES_INPUT_SEL_LINEAR_CTR SE_AES_CRYPTO_CFG_INPUT_SEL(3)
+#define SE_AES_INPUT_SEL_REG SE_AES_CRYPTO_CFG_INPUT_SEL(1)
+
+#define SE_AES_CRYPTO_CFG_XOR_POS(x) FIELD_PREP(GENMASK(2, 1), x)
+#define SE_AES_XOR_POS_BYPASS SE_AES_CRYPTO_CFG_XOR_POS(0)
+#define SE_AES_XOR_POS_BOTH SE_AES_CRYPTO_CFG_XOR_POS(1)
+#define SE_AES_XOR_POS_TOP SE_AES_CRYPTO_CFG_XOR_POS(2)
+#define SE_AES_XOR_POS_BOTTOM SE_AES_CRYPTO_CFG_XOR_POS(3)
+
+#define SE_AES_CRYPTO_CFG_HASH_EN(x) FIELD_PREP(BIT(0), x)
+#define SE_AES_HASH_DISABLE SE_AES_CRYPTO_CFG_HASH_EN(0)
+#define SE_AES_HASH_ENABLE SE_AES_CRYPTO_CFG_HASH_EN(1)
+
+#define SE_LAST_BLOCK_VAL(x) FIELD_PREP(GENMASK(19, 0), x)
+#define SE_LAST_BLOCK_RES_BITS(x) FIELD_PREP(GENMASK(26, 20), x)
+
+#define SE_AES_OP_LASTBUF FIELD_PREP(BIT(16), 1)
+#define SE_AES_OP_WRSTALL FIELD_PREP(BIT(15), 1)
+#define SE_AES_OP_FINAL FIELD_PREP(BIT(5), 1)
+#define SE_AES_OP_INIT FIELD_PREP(BIT(4), 1)
+
+#define SE_AES_OP_OP(x) FIELD_PREP(GENMASK(2, 0), x)
+#define SE_AES_OP_START SE_AES_OP_OP(1)
+#define SE_AES_OP_RESTART_OUT SE_AES_OP_OP(2)
+#define SE_AES_OP_RESTART_IN SE_AES_OP_OP(4)
+#define SE_AES_OP_RESTART_INOUT SE_AES_OP_OP(5)
+#define SE_AES_OP_DUMMY SE_AES_OP_OP(6)
+
+#define SE_KAC_SIZE(x) FIELD_PREP(GENMASK(15, 14), x)
+#define SE_KAC_SIZE_128 SE_KAC_SIZE(0)
+#define SE_KAC_SIZE_192 SE_KAC_SIZE(1)
+#define SE_KAC_SIZE_256 SE_KAC_SIZE(2)
+
+#define SE_KAC_EXPORTABLE FIELD_PREP(BIT(12), 1)
+
+#define SE_KAC_PURPOSE(x) FIELD_PREP(GENMASK(11, 8), x)
+#define SE_KAC_ENC SE_KAC_PURPOSE(0)
+#define SE_KAC_CMAC SE_KAC_PURPOSE(1)
+#define SE_KAC_HMAC SE_KAC_PURPOSE(2)
+#define SE_KAC_GCM_KW SE_KAC_PURPOSE(3)
+#define SE_KAC_HMAC_KDK SE_KAC_PURPOSE(6)
+#define SE_KAC_HMAC_KDD SE_KAC_PURPOSE(7)
+#define SE_KAC_HMAC_KDD_KUW SE_KAC_PURPOSE(8)
+#define SE_KAC_XTS SE_KAC_PURPOSE(9)
+#define SE_KAC_GCM SE_KAC_PURPOSE(10)
+
+#define SE_KAC_USER_NS FIELD_PREP(GENMASK(6, 4), 3)
+
+#define SE_AES_KEY_DST_INDEX(x) FIELD_PREP(GENMASK(11, 8), x)
+#define SE_ADDR_HI_MSB(x) FIELD_PREP(GENMASK(31, 24), x)
+#define SE_ADDR_HI_SZ(x) FIELD_PREP(GENMASK(23, 0), x)
+
+#define SE_CFG_AES_ENCRYPT (SE_AES_ENC_ALG_AES_ENC | \
+ SE_AES_DEC_ALG_NOP | \
+ SE_AES_DST_MEMORY)
+
+#define SE_CFG_AES_DECRYPT (SE_AES_ENC_ALG_NOP | \
+ SE_AES_DEC_ALG_AES_DEC | \
+ SE_AES_DST_MEMORY)
+
+#define SE_CFG_GMAC_ENCRYPT (SE_AES_ENC_ALG_AES_ENC | \
+ SE_AES_DEC_ALG_NOP | \
+ SE_AES_ENC_MODE_GMAC | \
+ SE_AES_DST_MEMORY)
+
+#define SE_CFG_GMAC_DECRYPT (SE_AES_ENC_ALG_NOP | \
+ SE_AES_DEC_ALG_AES_DEC | \
+ SE_AES_DEC_MODE_GMAC | \
+ SE_AES_DST_MEMORY)
+
+#define SE_CFG_GCM_ENCRYPT (SE_AES_ENC_ALG_AES_ENC | \
+ SE_AES_DEC_ALG_NOP | \
+ SE_AES_ENC_MODE_GCM | \
+ SE_AES_DST_MEMORY)
+
+#define SE_CFG_GCM_DECRYPT (SE_AES_ENC_ALG_NOP | \
+ SE_AES_DEC_ALG_AES_DEC | \
+ SE_AES_DEC_MODE_GCM | \
+ SE_AES_DST_MEMORY)
+
+#define SE_CFG_GCM_FINAL_ENCRYPT (SE_AES_ENC_ALG_AES_ENC | \
+ SE_AES_DEC_ALG_NOP | \
+ SE_AES_ENC_MODE_GCM_FINAL | \
+ SE_AES_DST_MEMORY)
+
+#define SE_CFG_GCM_FINAL_DECRYPT (SE_AES_ENC_ALG_NOP | \
+ SE_AES_DEC_ALG_AES_DEC | \
+ SE_AES_DEC_MODE_GCM_FINAL | \
+ SE_AES_DST_MEMORY)
+
+#define SE_CFG_CMAC (SE_AES_ENC_ALG_AES_ENC | \
+ SE_AES_ENC_MODE_CMAC | \
+ SE_AES_DST_HASH_REG)
+
+#define SE_CFG_CBC_MAC (SE_AES_ENC_ALG_AES_ENC | \
+ SE_AES_ENC_MODE_CBC_MAC)
+
+#define SE_CFG_INS (SE_AES_ENC_ALG_INS | \
+ SE_AES_DEC_ALG_NOP)
+
+#define SE_CRYPTO_CFG_ECB_ENCRYPT (SE_AES_INPUT_SEL_MEMORY | \
+ SE_AES_XOR_POS_BYPASS | \
+ SE_AES_CORE_SEL_ENCRYPT)
+
+#define SE_CRYPTO_CFG_ECB_DECRYPT (SE_AES_INPUT_SEL_MEMORY | \
+ SE_AES_XOR_POS_BYPASS | \
+ SE_AES_CORE_SEL_DECRYPT)
+
+#define SE_CRYPTO_CFG_CBC_ENCRYPT (SE_AES_INPUT_SEL_MEMORY | \
+ SE_AES_VCTRAM_SEL_AESOUT | \
+ SE_AES_XOR_POS_TOP | \
+ SE_AES_CORE_SEL_ENCRYPT | \
+ SE_AES_IV_SEL_REG)
+
+#define SE_CRYPTO_CFG_CBC_DECRYPT (SE_AES_INPUT_SEL_MEMORY | \
+ SE_AES_VCTRAM_SEL_PREV_MEM | \
+ SE_AES_XOR_POS_BOTTOM | \
+ SE_AES_CORE_SEL_DECRYPT | \
+ SE_AES_IV_SEL_REG)
+
+#define SE_CRYPTO_CFG_CTR (SE_AES_INPUT_SEL_LINEAR_CTR | \
+ SE_AES_VCTRAM_SEL_MEMORY | \
+ SE_AES_XOR_POS_BOTTOM | \
+ SE_AES_CORE_SEL_ENCRYPT | \
+ SE_AES_CRYPTO_CFG_CTR_CNTN(1) | \
+ SE_AES_IV_SEL_REG)
+
+#define SE_CRYPTO_CFG_XTS_ENCRYPT (SE_AES_INPUT_SEL_MEMORY | \
+ SE_AES_VCTRAM_SEL_TWEAK | \
+ SE_AES_XOR_POS_BOTH | \
+ SE_AES_CORE_SEL_ENCRYPT | \
+ SE_AES_IV_SEL_REG)
+
+#define SE_CRYPTO_CFG_XTS_DECRYPT (SE_AES_INPUT_SEL_MEMORY | \
+ SE_AES_VCTRAM_SEL_TWEAK | \
+ SE_AES_XOR_POS_BOTH | \
+ SE_AES_CORE_SEL_DECRYPT | \
+ SE_AES_IV_SEL_REG)
+
+#define SE_CRYPTO_CFG_XTS_DECRYPT (SE_AES_INPUT_SEL_MEMORY | \
+ SE_AES_VCTRAM_SEL_TWEAK | \
+ SE_AES_XOR_POS_BOTH | \
+ SE_AES_CORE_SEL_DECRYPT | \
+ SE_AES_IV_SEL_REG)
+
+#define SE_CRYPTO_CFG_CBC_MAC (SE_AES_INPUT_SEL_MEMORY | \
+ SE_AES_VCTRAM_SEL_AESOUT | \
+ SE_AES_XOR_POS_TOP | \
+ SE_AES_CORE_SEL_ENCRYPT | \
+ SE_AES_HASH_ENABLE | \
+ SE_AES_IV_SEL_REG)
+
+#define HASH_RESULT_REG_COUNT 50
+#define CMAC_RESULT_REG_COUNT 4
+
+#define SE_CRYPTO_CTR_REG_COUNT 4
+#define SE_MAX_KEYSLOT 15
+#define SE_MAX_MEM_ALLOC SZ_4M
+#define SE_AES_BUFLEN 0x8000
+#define SE_SHA_BUFLEN 0x2000
+
+#define SHA_FIRST BIT(0)
+#define SHA_UPDATE BIT(1)
+#define SHA_FINAL BIT(2)
+
+/* Security Engine operation modes */
+enum se_aes_alg {
+ SE_ALG_CBC, /* Cipher Block Chaining (CBC) mode */
+ SE_ALG_ECB, /* Electronic Codebook (ECB) mode */
+ SE_ALG_CTR, /* Counter (CTR) mode */
+ SE_ALG_XTS, /* XTS mode */
+ SE_ALG_GMAC, /* GMAC mode */
+ SE_ALG_GCM, /* GCM mode */
+ SE_ALG_GCM_FINAL, /* GCM FINAL mode */
+ SE_ALG_CMAC, /* Cipher-based MAC (CMAC) mode */
+ SE_ALG_CBC_MAC, /* CBC MAC mode */
+};
+
+enum se_hash_alg {
+ SE_ALG_RNG_DRBG, /* Deterministic Random Bit Generator */
+ SE_ALG_SHA1, /* Secure Hash Algorithm-1 (SHA1) mode */
+ SE_ALG_SHA224, /* Secure Hash Algorithm-224 (SHA224) mode */
+ SE_ALG_SHA256, /* Secure Hash Algorithm-256 (SHA256) mode */
+ SE_ALG_SHA384, /* Secure Hash Algorithm-384 (SHA384) mode */
+ SE_ALG_SHA512, /* Secure Hash Algorithm-512 (SHA512) mode */
+ SE_ALG_SHA3_224, /* Secure Hash Algorithm3-224 (SHA3-224) mode */
+ SE_ALG_SHA3_256, /* Secure Hash Algorithm3-256 (SHA3-256) mode */
+ SE_ALG_SHA3_384, /* Secure Hash Algorithm3-384 (SHA3-384) mode */
+ SE_ALG_SHA3_512, /* Secure Hash Algorithm3-512 (SHA3-512) mode */
+ SE_ALG_SHAKE128, /* Secure Hash Algorithm3 (SHAKE128) mode */
+ SE_ALG_SHAKE256, /* Secure Hash Algorithm3 (SHAKE256) mode */
+ SE_ALG_HMAC_SHA224, /* Hash based MAC (HMAC) - 224 */
+ SE_ALG_HMAC_SHA256, /* Hash based MAC (HMAC) - 256 */
+ SE_ALG_HMAC_SHA384, /* Hash based MAC (HMAC) - 384 */
+ SE_ALG_HMAC_SHA512, /* Hash based MAC (HMAC) - 512 */
+};
+
+struct tegra_se_alg {
+ struct tegra_se *se_dev;
+ const char *alg_base;
+
+ union {
+ struct skcipher_engine_alg skcipher;
+ struct aead_engine_alg aead;
+ struct ahash_engine_alg ahash;
+ } alg;
+};
+
+struct tegra_se_regs {
+ u32 op;
+ u32 config;
+ u32 last_blk;
+ u32 linear_ctr;
+ u32 out_addr;
+ u32 aad_len;
+ u32 cryp_msg_len;
+ u32 manifest;
+ u32 key_addr;
+ u32 key_data;
+ u32 key_dst;
+ u32 result;
+};
+
+struct tegra_se_hw {
+ const struct tegra_se_regs *regs;
+ int (*init_alg)(struct tegra_se *se);
+ void (*deinit_alg)(struct tegra_se *se);
+ bool support_sm_alg;
+ u32 host1x_class;
+ u32 kac_ver;
+};
+
+struct tegra_se {
+ int (*manifest)(u32 user, u32 alg, u32 keylen);
+ const struct tegra_se_hw *hw;
+ struct host1x_client client;
+ struct host1x_channel *channel;
+ struct tegra_se_cmdbuf *cmdbuf;
+ struct crypto_engine *engine;
+ struct host1x_syncpt *syncpt;
+ struct device *dev;
+ struct clk *clk;
+ unsigned int opcode_addr;
+ unsigned int stream_id;
+ unsigned int syncpt_id;
+ void __iomem *base;
+ u32 owner;
+};
+
+struct tegra_se_cmdbuf {
+ dma_addr_t iova;
+ u32 *addr;
+ struct device *dev;
+ struct kref ref;
+ struct host1x_bo bo;
+ ssize_t size;
+ u32 words;
+};
+
+struct tegra_se_datbuf {
+ u8 *buf;
+ dma_addr_t addr;
+ ssize_t size;
+};
+
+static inline int se_algname_to_algid(const char *name)
+{
+ if (!strcmp(name, "cbc(aes)"))
+ return SE_ALG_CBC;
+ else if (!strcmp(name, "ecb(aes)"))
+ return SE_ALG_ECB;
+ else if (!strcmp(name, "ctr(aes)"))
+ return SE_ALG_CTR;
+ else if (!strcmp(name, "xts(aes)"))
+ return SE_ALG_XTS;
+ else if (!strcmp(name, "cmac(aes)"))
+ return SE_ALG_CMAC;
+ else if (!strcmp(name, "gcm(aes)"))
+ return SE_ALG_GCM;
+ else if (!strcmp(name, "ccm(aes)"))
+ return SE_ALG_CBC_MAC;
+
+ else if (!strcmp(name, "sha1"))
+ return SE_ALG_SHA1;
+ else if (!strcmp(name, "sha224"))
+ return SE_ALG_SHA224;
+ else if (!strcmp(name, "sha256"))
+ return SE_ALG_SHA256;
+ else if (!strcmp(name, "sha384"))
+ return SE_ALG_SHA384;
+ else if (!strcmp(name, "sha512"))
+ return SE_ALG_SHA512;
+ else if (!strcmp(name, "sha3-224"))
+ return SE_ALG_SHA3_224;
+ else if (!strcmp(name, "sha3-256"))
+ return SE_ALG_SHA3_256;
+ else if (!strcmp(name, "sha3-384"))
+ return SE_ALG_SHA3_384;
+ else if (!strcmp(name, "sha3-512"))
+ return SE_ALG_SHA3_512;
+ else if (!strcmp(name, "hmac(sha224)"))
+ return SE_ALG_HMAC_SHA224;
+ else if (!strcmp(name, "hmac(sha256)"))
+ return SE_ALG_HMAC_SHA256;
+ else if (!strcmp(name, "hmac(sha384)"))
+ return SE_ALG_HMAC_SHA384;
+ else if (!strcmp(name, "hmac(sha512)"))
+ return SE_ALG_HMAC_SHA512;
+ else
+ return -EINVAL;
+}
+
+/* Functions */
+int tegra_init_aes(struct tegra_se *se);
+int tegra_init_hash(struct tegra_se *se);
+void tegra_deinit_aes(struct tegra_se *se);
+void tegra_deinit_hash(struct tegra_se *se);
+int tegra_key_submit(struct tegra_se *se, const u8 *key,
+ u32 keylen, u32 alg, u32 *keyid);
+void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg);
+int tegra_se_host1x_submit(struct tegra_se *se, u32 size);
+
+/* HOST1x OPCODES */
+static inline u32 host1x_opcode_setpayload(unsigned int payload)
+{
+ return (9 << 28) | payload;
+}
+
+static inline u32 host1x_opcode_incr_w(unsigned int offset)
+{
+ /* 22-bit offset supported */
+ return (10 << 28) | offset;
+}
+
+static inline u32 host1x_opcode_nonincr_w(unsigned int offset)
+{
+ /* 22-bit offset supported */
+ return (11 << 28) | offset;
+}
+
+static inline u32 host1x_opcode_incr(unsigned int offset, unsigned int count)
+{
+ return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_nonincr(unsigned int offset, unsigned int count)
+{
+ return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+ return (v & 0xff) << 10;
+}
+
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+ return (v & 0x3ff) << 0;
+}
+
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+ return 0x8;
+}
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+ return 0x0;
+}
+
+#define se_host1x_opcode_incr_w(x) host1x_opcode_incr_w((x) / 4)
+#define se_host1x_opcode_nonincr_w(x) host1x_opcode_nonincr_w((x) / 4)
+#define se_host1x_opcode_incr(x, y) host1x_opcode_incr((x) / 4, y)
+#define se_host1x_opcode_nonincr(x, y) host1x_opcode_nonincr((x) / 4, y)
+
+#endif /*_TEGRA_SE_H*/
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index af5cb818f84d..cb8c155a2c9b 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -525,22 +525,11 @@ static int get_genport_coordinates(struct device *dev, struct cxl_dport *dport)
{
struct acpi_device *hb = to_cxl_host_bridge(NULL, dev);
u32 uid;
- int rc;
if (kstrtou32(acpi_device_uid(hb), 0, &uid))
return -EINVAL;
- rc = acpi_get_genport_coordinates(uid, dport->hb_coord);
- if (rc < 0)
- return rc;
-
- /* Adjust back to picoseconds from nanoseconds */
- for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
- dport->hb_coord[i].read_latency *= 1000;
- dport->hb_coord[i].write_latency *= 1000;
- }
-
- return 0;
+ return acpi_get_genport_coordinates(uid, dport->coord);
}
static int add_host_bridge_dport(struct device *match, void *arg)
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index eddbbe21450c..bb83867d9fec 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -14,12 +14,42 @@
struct dsmas_entry {
struct range dpa_range;
u8 handle;
- struct access_coordinate coord;
+ struct access_coordinate coord[ACCESS_COORDINATE_MAX];
int entries;
int qos_class;
};
+static u32 cdat_normalize(u16 entry, u64 base, u8 type)
+{
+ u32 value;
+
+ /*
+ * Check for invalid and overflow values
+ */
+ if (entry == 0xffff || !entry)
+ return 0;
+ else if (base > (UINT_MAX / (entry)))
+ return 0;
+
+ /*
+ * CDAT fields follow the format of HMAT fields. See table 5 Device
+ * Scoped Latency and Bandwidth Information Structure in Coherent Device
+ * Attribute Table (CDAT) Specification v1.01.
+ */
+ value = entry * base;
+ switch (type) {
+ case ACPI_HMAT_ACCESS_LATENCY:
+ case ACPI_HMAT_READ_LATENCY:
+ case ACPI_HMAT_WRITE_LATENCY:
+ value = DIV_ROUND_UP(value, 1000);
+ break;
+ default:
+ break;
+ }
+ return value;
+}
+
static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
const unsigned long end)
{
@@ -58,8 +88,8 @@ static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
return 0;
}
-static void cxl_access_coordinate_set(struct access_coordinate *coord,
- int access, unsigned int val)
+static void __cxl_access_coordinate_set(struct access_coordinate *coord,
+ int access, unsigned int val)
{
switch (access) {
case ACPI_HMAT_ACCESS_LATENCY:
@@ -85,6 +115,13 @@ static void cxl_access_coordinate_set(struct access_coordinate *coord,
}
}
+static void cxl_access_coordinate_set(struct access_coordinate *coord,
+ int access, unsigned int val)
+{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
+ __cxl_access_coordinate_set(&coord[i], access, val);
+}
+
static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
const unsigned long end)
{
@@ -97,7 +134,6 @@ static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
__le16 le_val;
u64 val;
u16 len;
- int rc;
len = le16_to_cpu((__force __le16)hdr->length);
if (len != size || (unsigned long)hdr + len > end) {
@@ -124,12 +160,10 @@ static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
le_base = (__force __le64)dslbis->entry_base_unit;
le_val = (__force __le16)dslbis->entry[0];
- rc = check_mul_overflow(le64_to_cpu(le_base),
- le16_to_cpu(le_val), &val);
- if (rc)
- pr_warn("DSLBIS value overflowed.\n");
+ val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
+ dslbis->data_type);
- cxl_access_coordinate_set(&dent->coord, dslbis->data_type, val);
+ cxl_access_coordinate_set(dent->coord, dslbis->data_type, val);
return 0;
}
@@ -163,25 +197,18 @@ static int cxl_cdat_endpoint_process(struct cxl_port *port,
static int cxl_port_perf_data_calculate(struct cxl_port *port,
struct xarray *dsmas_xa)
{
- struct access_coordinate ep_c;
- struct access_coordinate coord[ACCESS_COORDINATE_MAX];
+ struct access_coordinate ep_c[ACCESS_COORDINATE_MAX];
struct dsmas_entry *dent;
int valid_entries = 0;
unsigned long index;
int rc;
- rc = cxl_endpoint_get_perf_coordinates(port, &ep_c);
+ rc = cxl_endpoint_get_perf_coordinates(port, ep_c);
if (rc) {
dev_dbg(&port->dev, "Failed to retrieve ep perf coordinates.\n");
return rc;
}
- rc = cxl_hb_get_perf_coordinates(port, coord);
- if (rc) {
- dev_dbg(&port->dev, "Failed to retrieve hb perf coordinates.\n");
- return rc;
- }
-
struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
if (!cxl_root)
@@ -193,18 +220,10 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
xa_for_each(dsmas_xa, index, dent) {
int qos_class;
- cxl_coordinates_combine(&dent->coord, &dent->coord, &ep_c);
- /*
- * Keeping the host bridge coordinates separate from the dsmas
- * coordinates in order to allow calculation of access class
- * 0 and 1 for region later.
- */
- cxl_coordinates_combine(&coord[ACCESS_COORDINATE_CPU],
- &coord[ACCESS_COORDINATE_CPU],
- &dent->coord);
+ cxl_coordinates_combine(dent->coord, dent->coord, ep_c);
dent->entries = 1;
rc = cxl_root->ops->qos_class(cxl_root,
- &coord[ACCESS_COORDINATE_CPU],
+ &dent->coord[ACCESS_COORDINATE_CPU],
1, &qos_class);
if (rc != 1)
continue;
@@ -222,14 +241,17 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
struct cxl_dpa_perf *dpa_perf)
{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
+ dpa_perf->coord[i] = dent->coord[i];
dpa_perf->dpa_range = dent->dpa_range;
- dpa_perf->coord = dent->coord;
dpa_perf->qos_class = dent->qos_class;
dev_dbg(dev,
"DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
dent->dpa_range.start, dpa_perf->qos_class,
- dent->coord.read_bandwidth, dent->coord.write_bandwidth,
- dent->coord.read_latency, dent->coord.write_latency);
+ dent->coord[ACCESS_COORDINATE_CPU].read_bandwidth,
+ dent->coord[ACCESS_COORDINATE_CPU].write_bandwidth,
+ dent->coord[ACCESS_COORDINATE_CPU].read_latency,
+ dent->coord[ACCESS_COORDINATE_CPU].write_latency);
}
static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
@@ -461,17 +483,16 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
le_base = (__force __le64)tbl->sslbis_header.entry_base_unit;
le_val = (__force __le16)tbl->entries[i].latency_or_bandwidth;
-
- if (check_mul_overflow(le64_to_cpu(le_base),
- le16_to_cpu(le_val), &val))
- dev_warn(dev, "SSLBIS value overflowed!\n");
+ val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
+ sslbis->data_type);
xa_for_each(&port->dports, index, dport) {
if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
- dsp_id == dport->port_id)
- cxl_access_coordinate_set(&dport->sw_coord,
+ dsp_id == dport->port_id) {
+ cxl_access_coordinate_set(dport->coord,
sslbis->data_type,
val);
+ }
}
}
@@ -493,6 +514,21 @@ void cxl_switch_parse_cdat(struct cxl_port *port)
}
EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, CXL);
+static void __cxl_coordinates_combine(struct access_coordinate *out,
+ struct access_coordinate *c1,
+ struct access_coordinate *c2)
+{
+ if (c1->write_bandwidth && c2->write_bandwidth)
+ out->write_bandwidth = min(c1->write_bandwidth,
+ c2->write_bandwidth);
+ out->write_latency = c1->write_latency + c2->write_latency;
+
+ if (c1->read_bandwidth && c2->read_bandwidth)
+ out->read_bandwidth = min(c1->read_bandwidth,
+ c2->read_bandwidth);
+ out->read_latency = c1->read_latency + c2->read_latency;
+}
+
/**
* cxl_coordinates_combine - Combine the two input coordinates
*
@@ -504,15 +540,8 @@ void cxl_coordinates_combine(struct access_coordinate *out,
struct access_coordinate *c1,
struct access_coordinate *c2)
{
- if (c1->write_bandwidth && c2->write_bandwidth)
- out->write_bandwidth = min(c1->write_bandwidth,
- c2->write_bandwidth);
- out->write_latency = c1->write_latency + c2->write_latency;
-
- if (c1->read_bandwidth && c2->read_bandwidth)
- out->read_bandwidth = min(c1->read_bandwidth,
- c2->read_bandwidth);
- out->read_latency = c1->read_latency + c2->read_latency;
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
+ __cxl_coordinates_combine(&out[i], &c1[i], &c2[i]);
}
MODULE_IMPORT_NS(CXL);
@@ -521,17 +550,13 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- struct cxl_port *port = cxlmd->endpoint;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- struct access_coordinate hb_coord[ACCESS_COORDINATE_MAX];
- struct access_coordinate coord;
struct range dpa = {
.start = cxled->dpa_res->start,
.end = cxled->dpa_res->end,
};
struct cxl_dpa_perf *perf;
- int rc;
switch (cxlr->mode) {
case CXL_DECODER_RAM:
@@ -549,35 +574,16 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
if (!range_contains(&perf->dpa_range, &dpa))
return;
- rc = cxl_hb_get_perf_coordinates(port, hb_coord);
- if (rc) {
- dev_dbg(&port->dev, "Failed to retrieve hb perf coordinates.\n");
- return;
- }
-
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
- /* Pickup the host bridge coords */
- cxl_coordinates_combine(&coord, &hb_coord[i], &perf->coord);
-
/* Get total bandwidth and the worst latency for the cxl region */
cxlr->coord[i].read_latency = max_t(unsigned int,
cxlr->coord[i].read_latency,
- coord.read_latency);
+ perf->coord[i].read_latency);
cxlr->coord[i].write_latency = max_t(unsigned int,
cxlr->coord[i].write_latency,
- coord.write_latency);
- cxlr->coord[i].read_bandwidth += coord.read_bandwidth;
- cxlr->coord[i].write_bandwidth += coord.write_bandwidth;
-
- /*
- * Convert latency to nanosec from picosec to be consistent
- * with the resulting latency coordinates computed by the
- * HMAT_REPORTING code.
- */
- cxlr->coord[i].read_latency =
- DIV_ROUND_UP(cxlr->coord[i].read_latency, 1000);
- cxlr->coord[i].write_latency =
- DIV_ROUND_UP(cxlr->coord[i].write_latency, 1000);
+ perf->coord[i].write_latency);
+ cxlr->coord[i].read_bandwidth += perf->coord[i].read_bandwidth;
+ cxlr->coord[i].write_bandwidth += perf->coord[i].write_bandwidth;
}
}
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 9adda4795eb7..65185c9fa001 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -915,7 +915,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds,
payload->handles[i++] = gen->hdr.handle;
dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
- le16_to_cpu(payload->handles[i]));
+ le16_to_cpu(payload->handles[i - 1]));
if (i == max_handles) {
payload->nr_recs = i;
@@ -946,24 +946,22 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
struct device *dev = mds->cxlds.dev;
struct cxl_get_event_payload *payload;
- struct cxl_mbox_cmd mbox_cmd;
u8 log_type = type;
u16 nr_rec;
mutex_lock(&mds->event.log_lock);
payload = mds->event.buf;
- mbox_cmd = (struct cxl_mbox_cmd) {
- .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
- .payload_in = &log_type,
- .size_in = sizeof(log_type),
- .payload_out = payload,
- .size_out = mds->payload_size,
- .min_out = struct_size(payload, records, 0),
- };
-
do {
int rc, i;
+ struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
+ .payload_in = &log_type,
+ .size_in = sizeof(log_type),
+ .payload_out = payload,
+ .size_out = mds->payload_size,
+ .min_out = struct_size(payload, records, 0),
+ };
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc) {
@@ -1296,7 +1294,6 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_mbox_poison_out *po;
struct cxl_mbox_poison_in pi;
- struct cxl_mbox_cmd mbox_cmd;
int nr_records = 0;
int rc;
@@ -1308,16 +1305,16 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
pi.offset = cpu_to_le64(offset);
pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT);
- mbox_cmd = (struct cxl_mbox_cmd) {
- .opcode = CXL_MBOX_OP_GET_POISON,
- .size_in = sizeof(pi),
- .payload_in = &pi,
- .size_out = mds->payload_size,
- .payload_out = po,
- .min_out = struct_size(po, record, 0),
- };
-
do {
+ struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd){
+ .opcode = CXL_MBOX_OP_GET_POISON,
+ .size_in = sizeof(pi),
+ .payload_in = &pi,
+ .size_out = mds->payload_size,
+ .payload_out = po,
+ .min_out = struct_size(po, record, 0),
+ };
+
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc)
break;
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 2b0cab556072..887ed6e358fb 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -2133,36 +2133,44 @@ bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
}
EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
-/**
- * cxl_hb_get_perf_coordinates - Retrieve performance numbers between initiator
- * and host bridge
- *
- * @port: endpoint cxl_port
- * @coord: output access coordinates
- *
- * Return: errno on failure, 0 on success.
- */
-int cxl_hb_get_perf_coordinates(struct cxl_port *port,
- struct access_coordinate *coord)
+static void add_latency(struct access_coordinate *c, long latency)
{
- struct cxl_port *iter = port;
- struct cxl_dport *dport;
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ c[i].write_latency += latency;
+ c[i].read_latency += latency;
+ }
+}
- if (!is_cxl_endpoint(port))
- return -EINVAL;
+static bool coordinates_valid(struct access_coordinate *c)
+{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ if (c[i].read_bandwidth && c[i].write_bandwidth &&
+ c[i].read_latency && c[i].write_latency)
+ continue;
+ return false;
+ }
- dport = iter->parent_dport;
- while (iter && !is_cxl_root(to_cxl_port(iter->dev.parent))) {
- iter = to_cxl_port(iter->dev.parent);
- dport = iter->parent_dport;
+ return true;
+}
+
+static void set_min_bandwidth(struct access_coordinate *c, unsigned int bw)
+{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ c[i].write_bandwidth = min(c[i].write_bandwidth, bw);
+ c[i].read_bandwidth = min(c[i].read_bandwidth, bw);
}
+}
- coord[ACCESS_COORDINATE_LOCAL] =
- dport->hb_coord[ACCESS_COORDINATE_LOCAL];
- coord[ACCESS_COORDINATE_CPU] =
- dport->hb_coord[ACCESS_COORDINATE_CPU];
+static void set_access_coordinates(struct access_coordinate *out,
+ struct access_coordinate *in)
+{
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
+ out[i] = in[i];
+}
- return 0;
+static bool parent_port_is_cxl_root(struct cxl_port *port)
+{
+ return is_cxl_root(to_cxl_port(port->dev.parent));
}
/**
@@ -2176,47 +2184,76 @@ int cxl_hb_get_perf_coordinates(struct cxl_port *port,
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord)
{
- struct access_coordinate c = {
- .read_bandwidth = UINT_MAX,
- .write_bandwidth = UINT_MAX,
+ struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
+ struct access_coordinate c[] = {
+ {
+ .read_bandwidth = UINT_MAX,
+ .write_bandwidth = UINT_MAX,
+ },
+ {
+ .read_bandwidth = UINT_MAX,
+ .write_bandwidth = UINT_MAX,
+ },
};
struct cxl_port *iter = port;
struct cxl_dport *dport;
struct pci_dev *pdev;
+ struct device *dev;
unsigned int bw;
+ bool is_cxl_root;
if (!is_cxl_endpoint(port))
return -EINVAL;
- dport = iter->parent_dport;
-
/*
- * Exit the loop when the parent port of the current port is cxl root.
- * The iterative loop starts at the endpoint and gathers the
- * latency of the CXL link from the current iter to the next downstream
- * port each iteration. If the parent is cxl root then there is
- * nothing to gather.
+ * Skip calculation for RCD. Expectation is HMAT already covers RCD case
+ * since RCH does not support hotplug.
*/
- while (iter && !is_cxl_root(to_cxl_port(iter->dev.parent))) {
- cxl_coordinates_combine(&c, &c, &dport->sw_coord);
- c.write_latency += dport->link_latency;
- c.read_latency += dport->link_latency;
+ if (cxlmd->cxlds->rcd)
+ return 0;
- iter = to_cxl_port(iter->dev.parent);
+ /*
+ * Exit the loop when the parent port of the current iter port is cxl
+ * root. The iterative loop starts at the endpoint and gathers the
+ * latency of the CXL link from the current device/port to the connected
+ * downstream port each iteration.
+ */
+ do {
dport = iter->parent_dport;
- }
+ iter = to_cxl_port(iter->dev.parent);
+ is_cxl_root = parent_port_is_cxl_root(iter);
+
+ /*
+ * There's no valid access_coordinate for a root port since RPs do not
+ * have CDAT and therefore needs to be skipped.
+ */
+ if (!is_cxl_root) {
+ if (!coordinates_valid(dport->coord))
+ return -EINVAL;
+ cxl_coordinates_combine(c, c, dport->coord);
+ }
+ add_latency(c, dport->link_latency);
+ } while (!is_cxl_root);
+
+ dport = iter->parent_dport;
+ /* Retrieve HB coords */
+ if (!coordinates_valid(dport->coord))
+ return -EINVAL;
+ cxl_coordinates_combine(c, c, dport->coord);
+
+ dev = port->uport_dev->parent;
+ if (!dev_is_pci(dev))
+ return -ENODEV;
/* Get the calculated PCI paths bandwidth */
- pdev = to_pci_dev(port->uport_dev->parent);
+ pdev = to_pci_dev(dev);
bw = pcie_bandwidth_available(pdev, NULL, NULL, NULL);
if (bw == 0)
return -ENXIO;
bw /= BITS_PER_BYTE;
- c.write_bandwidth = min(c.write_bandwidth, bw);
- c.read_bandwidth = min(c.read_bandwidth, bw);
-
- *coord = c;
+ set_min_bandwidth(c, bw);
+ set_access_coordinates(coord, c);
return 0;
}
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
index 372786f80955..3c42f984eeaf 100644
--- a/drivers/cxl/core/regs.c
+++ b/drivers/cxl/core/regs.c
@@ -271,6 +271,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, CXL);
static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
struct cxl_register_map *map)
{
+ u8 reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
int bar = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BIR_MASK, reg_lo);
u64 offset = ((u64)reg_hi << 32) |
(reg_lo & CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK);
@@ -278,11 +279,11 @@ static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
if (offset > pci_resource_len(pdev, bar)) {
dev_warn(&pdev->dev,
"BAR%d: %pr: too small (offset: %pa, type: %d)\n", bar,
- &pdev->resource[bar], &offset, map->reg_type);
+ &pdev->resource[bar], &offset, reg_type);
return false;
}
- map->reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
+ map->reg_type = reg_type;
map->resource = pci_resource_start(pdev, bar) + offset;
map->max_size = pci_resource_len(pdev, bar) - offset;
return true;
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 534e25e2f0a4..036d17db68e0 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -663,8 +663,7 @@ struct cxl_rcrb_info {
* @rch: Indicate whether this dport was enumerated in RCH or VH mode
* @port: reference to cxl_port that contains this downstream port
* @regs: Dport parsed register blocks
- * @sw_coord: access coordinates (performance) for switch from CDAT
- * @hb_coord: access coordinates (performance) from ACPI generic port (host bridge)
+ * @coord: access coordinates (bandwidth and latency performance attributes)
* @link_latency: calculated PCIe downstream latency
*/
struct cxl_dport {
@@ -675,8 +674,7 @@ struct cxl_dport {
bool rch;
struct cxl_port *port;
struct cxl_regs regs;
- struct access_coordinate sw_coord;
- struct access_coordinate hb_coord[ACCESS_COORDINATE_MAX];
+ struct access_coordinate coord[ACCESS_COORDINATE_MAX];
long link_latency;
};
@@ -884,8 +882,6 @@ void cxl_switch_parse_cdat(struct cxl_port *port);
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord);
-int cxl_hb_get_perf_coordinates(struct cxl_port *port,
- struct access_coordinate *coord);
void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled);
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 20fb3b35e89e..36cee9c30ceb 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -401,7 +401,7 @@ enum cxl_devtype {
*/
struct cxl_dpa_perf {
struct range dpa_range;
- struct access_coordinate coord;
+ struct access_coordinate coord[ACCESS_COORDINATE_MAX];
int qos_class;
};
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 93ebedc5ec8c..c24ef4d3cf31 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -377,7 +377,7 @@ static const struct file_operations dax_fops = {
.release = dax_release,
.get_unmapped_area = dax_get_unmapped_area,
.mmap = dax_mmap,
- .mmap_supported_flags = MAP_SYNC,
+ .fop_flags = FOP_MMAP_SYNC,
};
static void dev_dax_cdev_del(void *cdev)
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
index c1cc23bcb995..5edc522f715c 100644
--- a/drivers/devfreq/event/exynos-nocp.c
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -275,18 +275,16 @@ static int exynos_nocp_probe(struct platform_device *pdev)
return 0;
}
-static int exynos_nocp_remove(struct platform_device *pdev)
+static void exynos_nocp_remove(struct platform_device *pdev)
{
struct exynos_nocp *nocp = platform_get_drvdata(pdev);
clk_disable_unprepare(nocp->clk);
-
- return 0;
}
static struct platform_driver exynos_nocp_driver = {
.probe = exynos_nocp_probe,
- .remove = exynos_nocp_remove,
+ .remove_new = exynos_nocp_remove,
.driver = {
.name = "exynos-nocp",
.of_match_table = exynos_nocp_id_match,
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 56bac4702006..7002df20a49e 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -692,18 +692,16 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
return 0;
}
-static int exynos_ppmu_remove(struct platform_device *pdev)
+static void exynos_ppmu_remove(struct platform_device *pdev)
{
struct exynos_ppmu *info = platform_get_drvdata(pdev);
clk_disable_unprepare(info->ppmu.clk);
-
- return 0;
}
static struct platform_driver exynos_ppmu_driver = {
.probe = exynos_ppmu_probe,
- .remove = exynos_ppmu_remove,
+ .remove_new = exynos_ppmu_remove,
.driver = {
.name = "exynos-ppmu",
.of_match_table = exynos_ppmu_id_match,
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index 245898f1a88e..00118580905a 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -467,7 +467,6 @@ static void exynos_bus_shutdown(struct platform_device *pdev)
devfreq_suspend_device(bus->devfreq);
}
-#ifdef CONFIG_PM_SLEEP
static int exynos_bus_resume(struct device *dev)
{
struct exynos_bus *bus = dev_get_drvdata(dev);
@@ -495,11 +494,9 @@ static int exynos_bus_suspend(struct device *dev)
return 0;
}
-#endif
-static const struct dev_pm_ops exynos_bus_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(exynos_bus_suspend, exynos_bus_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(exynos_bus_pm,
+ exynos_bus_suspend, exynos_bus_resume);
static const struct of_device_id exynos_bus_of_match[] = {
{ .compatible = "samsung,exynos-bus", },
@@ -512,7 +509,7 @@ static struct platform_driver exynos_bus_platdrv = {
.shutdown = exynos_bus_shutdown,
.driver = {
.name = "exynos-bus",
- .pm = &exynos_bus_pm,
+ .pm = pm_sleep_ptr(&exynos_bus_pm),
.of_match_table = exynos_bus_of_match,
},
};
diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c
index 11bc3d03494c..7ad5225b0381 100644
--- a/drivers/devfreq/mtk-cci-devfreq.c
+++ b/drivers/devfreq/mtk-cci-devfreq.c
@@ -392,7 +392,7 @@ out_free_resources:
return ret;
}
-static int mtk_ccifreq_remove(struct platform_device *pdev)
+static void mtk_ccifreq_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_ccifreq_drv *drv;
@@ -405,8 +405,6 @@ static int mtk_ccifreq_remove(struct platform_device *pdev)
regulator_disable(drv->proc_reg);
if (drv->sram_reg)
regulator_disable(drv->sram_reg);
-
- return 0;
}
static const struct mtk_ccifreq_platform_data mt8183_platform_data = {
@@ -432,7 +430,7 @@ MODULE_DEVICE_TABLE(of, mtk_ccifreq_machines);
static struct platform_driver mtk_ccifreq_platdrv = {
.probe = mtk_ccifreq_probe,
- .remove = mtk_ccifreq_remove,
+ .remove_new = mtk_ccifreq_remove,
.driver = {
.name = "mtk-ccifreq",
.of_match_table = mtk_ccifreq_machines,
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index fd2c5ffedf41..d405cee92c25 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -459,13 +459,11 @@ err_edev:
return ret;
}
-static int rk3399_dmcfreq_remove(struct platform_device *pdev)
+static void rk3399_dmcfreq_remove(struct platform_device *pdev)
{
struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(&pdev->dev);
devfreq_event_disable_edev(dmcfreq->edev);
-
- return 0;
}
static const struct of_device_id rk3399dmc_devfreq_of_match[] = {
@@ -476,7 +474,7 @@ MODULE_DEVICE_TABLE(of, rk3399dmc_devfreq_of_match);
static struct platform_driver rk3399_dmcfreq_driver = {
.probe = rk3399_dmcfreq_probe,
- .remove = rk3399_dmcfreq_remove,
+ .remove_new = rk3399_dmcfreq_remove,
.driver = {
.name = "rk3399-dmc-freq",
.pm = &rk3399_dmcfreq_pm,
diff --git a/drivers/devfreq/sun8i-a33-mbus.c b/drivers/devfreq/sun8i-a33-mbus.c
index 13d32213139f..bcf654f4ff96 100644
--- a/drivers/devfreq/sun8i-a33-mbus.c
+++ b/drivers/devfreq/sun8i-a33-mbus.c
@@ -458,7 +458,7 @@ err_disable_bus:
return dev_err_probe(dev, ret, err);
}
-static int sun8i_a33_mbus_remove(struct platform_device *pdev)
+static void sun8i_a33_mbus_remove(struct platform_device *pdev)
{
struct sun8i_a33_mbus *priv = platform_get_drvdata(pdev);
unsigned long initial_freq = priv->profile.initial_freq;
@@ -475,8 +475,6 @@ static int sun8i_a33_mbus_remove(struct platform_device *pdev)
clk_rate_exclusive_put(priv->clk_mbus);
clk_rate_exclusive_put(priv->clk_dram);
clk_disable_unprepare(priv->clk_bus);
-
- return 0;
}
static const struct sun8i_a33_mbus_variant sun50i_a64_mbus = {
@@ -497,7 +495,7 @@ static SIMPLE_DEV_PM_OPS(sun8i_a33_mbus_pm_ops,
static struct platform_driver sun8i_a33_mbus_driver = {
.probe = sun8i_a33_mbus_probe,
- .remove = sun8i_a33_mbus_remove,
+ .remove_new = sun8i_a33_mbus_remove,
.driver = {
.name = "sun8i-a33-mbus",
.of_match_table = sun8i_a33_mbus_of_match,
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 78a938969d7d..1398814d8fbb 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -171,6 +171,10 @@ static irqreturn_t idma64_irq(int irq, void *dev)
u32 status_err;
unsigned short i;
+ /* Since IRQ may be shared, check if DMA controller is powered on */
+ if (status == GENMASK(31, 0))
+ return IRQ_NONE;
+
dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
/* Check if we have any interrupt from the DMA controller */
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 8078ab9acfbc..39935071174a 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -342,7 +342,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
if (!evl)
return;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = status.tail;
h = status.head;
@@ -354,9 +354,8 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
set_bit(h, evl->bmap);
h = (h + 1) % size;
}
- spin_unlock(&evl->lock);
-
drain_workqueue(wq->wq);
+ mutex_unlock(&evl->lock);
}
static int idxd_cdev_release(struct inode *node, struct file *filep)
@@ -401,6 +400,18 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
int rc;
dev_dbg(&pdev->dev, "%s called\n", __func__);
+
+ /*
+ * Due to an erratum in some of the devices supported by the driver,
+ * direct user submission to the device can be unsafe.
+ * (See the INTEL-SA-01084 security advisory)
+ *
+ * For the devices that exhibit this behavior, require that the user
+ * has CAP_SYS_RAWIO capabilities.
+ */
+ if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
rc = check_vma(wq, vma, __func__);
if (rc < 0)
return rc;
@@ -415,6 +426,70 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_page_prot);
}
+static int idxd_submit_user_descriptor(struct idxd_user_context *ctx,
+ struct dsa_hw_desc __user *udesc)
+{
+ struct idxd_wq *wq = ctx->wq;
+ struct idxd_dev *idxd_dev = &wq->idxd->idxd_dev;
+ const uint64_t comp_addr_align = is_dsa_dev(idxd_dev) ? 0x20 : 0x40;
+ void __iomem *portal = idxd_wq_portal_addr(wq);
+ struct dsa_hw_desc descriptor __aligned(64);
+ int rc;
+
+ rc = copy_from_user(&descriptor, udesc, sizeof(descriptor));
+ if (rc)
+ return -EFAULT;
+
+ /*
+ * DSA devices are capable of indirect ("batch") command submission.
+ * On devices where direct user submissions are not safe, we cannot
+ * allow this since there is no good way for us to verify these
+ * indirect commands.
+ */
+ if (is_dsa_dev(idxd_dev) && descriptor.opcode == DSA_OPCODE_BATCH &&
+ !wq->idxd->user_submission_safe)
+ return -EINVAL;
+ /*
+ * As per the programming specification, the completion address must be
+ * aligned to 32 or 64 bytes. If this is violated the hardware
+ * engine can get very confused (security issue).
+ */
+ if (!IS_ALIGNED(descriptor.completion_addr, comp_addr_align))
+ return -EINVAL;
+
+ if (wq_dedicated(wq))
+ iosubmit_cmds512(portal, &descriptor, 1);
+ else {
+ descriptor.priv = 0;
+ descriptor.pasid = ctx->pasid;
+ rc = idxd_enqcmds(wq, portal, &descriptor);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t len,
+ loff_t *unused)
+{
+ struct dsa_hw_desc __user *udesc = (struct dsa_hw_desc __user *)buf;
+ struct idxd_user_context *ctx = filp->private_data;
+ ssize_t written = 0;
+ int i;
+
+ for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) {
+ int rc = idxd_submit_user_descriptor(ctx, udesc + i);
+
+ if (rc)
+ return written ? written : rc;
+
+ written += sizeof(struct dsa_hw_desc);
+ }
+
+ return written;
+}
+
static __poll_t idxd_cdev_poll(struct file *filp,
struct poll_table_struct *wait)
{
@@ -437,6 +512,7 @@ static const struct file_operations idxd_cdev_fops = {
.open = idxd_cdev_open,
.release = idxd_cdev_release,
.mmap = idxd_cdev_mmap,
+ .write = idxd_cdev_write,
.poll = idxd_cdev_poll,
};
diff --git a/drivers/dma/idxd/debugfs.c b/drivers/dma/idxd/debugfs.c
index f3f25ee676f3..ad4245cb301d 100644
--- a/drivers/dma/idxd/debugfs.c
+++ b/drivers/dma/idxd/debugfs.c
@@ -66,7 +66,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
if (!evl || !evl->log)
return 0;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = evl_status.tail;
@@ -87,7 +87,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
dump_event_entry(idxd, s, i, &count, processed);
}
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
return 0;
}
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index ecfdf4a8f1f8..c41ef195eeb9 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -775,7 +775,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
goto err_alloc;
}
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
evl->log = addr;
evl->dma = dma_addr;
evl->log_size = size;
@@ -796,7 +796,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
gencfg.evl_en = 1;
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
return 0;
err_alloc:
@@ -819,7 +819,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
if (!gencfg.evl_en)
return;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
gencfg.evl_en = 0;
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
@@ -836,7 +836,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
evl_dma = evl->dma;
evl->log = NULL;
evl->size = IDXD_EVL_SIZE_MIN;
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
dma_free_coherent(dev, evl_log_size, evl_log, evl_dma);
}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index a4099a1e2340..868b724a3b75 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -288,12 +288,13 @@ struct idxd_driver_data {
int evl_cr_off;
int cr_status_off;
int cr_result_off;
+ bool user_submission_safe;
load_device_defaults_fn_t load_device_defaults;
};
struct idxd_evl {
/* Lock to protect event log access. */
- spinlock_t lock;
+ struct mutex lock;
void *log;
dma_addr_t dma;
/* Total size of event log = number of entries * entry size. */
@@ -374,6 +375,8 @@ struct idxd_device {
struct dentry *dbgfs_dir;
struct dentry *dbgfs_evl_file;
+
+ bool user_submission_safe;
};
static inline unsigned int evl_ent_size(struct idxd_device *idxd)
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 4954adc6bb60..a7295943fa22 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -47,6 +47,7 @@ static struct idxd_driver_data idxd_driver_data[] = {
.align = 32,
.dev_type = &dsa_device_type,
.evl_cr_off = offsetof(struct dsa_evl_entry, cr),
+ .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
.cr_status_off = offsetof(struct dsa_completion_record, status),
.cr_result_off = offsetof(struct dsa_completion_record, result),
},
@@ -57,6 +58,7 @@ static struct idxd_driver_data idxd_driver_data[] = {
.align = 64,
.dev_type = &iax_device_type,
.evl_cr_off = offsetof(struct iax_evl_entry, cr),
+ .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
.cr_status_off = offsetof(struct iax_completion_record, status),
.cr_result_off = offsetof(struct iax_completion_record, error_code),
.load_device_defaults = idxd_load_iaa_device_defaults,
@@ -354,7 +356,7 @@ static int idxd_init_evl(struct idxd_device *idxd)
if (!evl)
return -ENOMEM;
- spin_lock_init(&evl->lock);
+ mutex_init(&evl->lock);
evl->size = IDXD_EVL_SIZE_MIN;
idxd_name = dev_name(idxd_confdev(idxd));
@@ -774,6 +776,8 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
idxd->hw.version);
+ idxd->user_submission_safe = data->user_submission_safe;
+
return 0;
err_dev_register:
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 348aa21389a9..8dc029c86551 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -363,7 +363,7 @@ static void process_evl_entries(struct idxd_device *idxd)
evl_status.bits = 0;
evl_status.int_pending = 1;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
/* Clear interrupt pending bit */
iowrite32(evl_status.bits_upper32,
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
@@ -380,7 +380,7 @@ static void process_evl_entries(struct idxd_device *idxd)
evl_status.head = h;
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
}
irqreturn_t idxd_misc_thread(int vec, void *data)
diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c
index fdda6d604262..5e94247e1ea7 100644
--- a/drivers/dma/idxd/perfmon.c
+++ b/drivers/dma/idxd/perfmon.c
@@ -528,14 +528,11 @@ static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
-
/* migrate events if there is a valid target */
- if (target < nr_cpu_ids)
+ if (target < nr_cpu_ids) {
cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
- else
- target = -1;
-
- perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
+ perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
+ }
return 0;
}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index 315c004f58e4..e16dbf9ab324 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -6,9 +6,6 @@
#include <uapi/linux/idxd.h>
/* PCI Config */
-#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
-#define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe
-
#define DEVICE_VERSION_1 0x100
#define DEVICE_VERSION_2 0x200
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 7f28f01be672..f706eae0e76b 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1197,12 +1197,35 @@ static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attrib
static struct device_attribute dev_attr_wq_enqcmds_retries =
__ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store);
+static ssize_t op_cap_show_common(struct device *dev, char *buf, unsigned long *opcap_bmap)
+{
+ ssize_t pos;
+ int i;
+
+ pos = 0;
+ for (i = IDXD_MAX_OPCAP_BITS/64 - 1; i >= 0; i--) {
+ unsigned long val = opcap_bmap[i];
+
+ /* On systems where direct user submissions are not safe, we need to clear out
+ * the BATCH capability from the capability mask in sysfs since we cannot support
+ * that command on such systems.
+ */
+ if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe)
+ clear_bit(DSA_OPCODE_BATCH % 64, &val);
+
+ pos += sysfs_emit_at(buf, pos, "%*pb", 64, &val);
+ pos += sysfs_emit_at(buf, pos, "%c", i == 0 ? '\n' : ',');
+ }
+
+ return pos;
+}
+
static ssize_t wq_op_config_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct idxd_wq *wq = confdev_to_wq(dev);
- return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, wq->opcap_bmap);
+ return op_cap_show_common(dev, buf, wq->opcap_bmap);
}
static int idxd_verify_supported_opcap(struct idxd_device *idxd, unsigned long *opmask)
@@ -1455,7 +1478,7 @@ static ssize_t op_cap_show(struct device *dev,
{
struct idxd_device *idxd = confdev_to_idxd(dev);
- return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, idxd->opcap_bmap);
+ return op_cap_show_common(dev, buf, idxd->opcap_bmap);
}
static DEVICE_ATTR_RO(op_cap);
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 4e76c4ec2d39..e001f4f7aa64 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -250,7 +250,7 @@ static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
else
regval &= ~val;
- writel(val, pchan->base + reg);
+ writel(regval, pchan->base + reg);
}
static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
@@ -274,7 +274,7 @@ static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
else
regval &= ~val;
- writel(val, od->base + reg);
+ writel(regval, od->base + reg);
}
static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 5f6d7f1e095f..ad8e3da1b2cd 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1053,9 +1053,6 @@ static bool _trigger(struct pl330_thread *thrd)
thrd->req_running = idx;
- if (desc->rqtype == DMA_MEM_TO_DEV || desc->rqtype == DMA_DEV_TO_MEM)
- UNTIL(thrd, PL330_STATE_WFP);
-
return true;
}
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index 88547a23825b..3642508e88bb 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -746,6 +746,9 @@ static int tegra_dma_get_residual(struct tegra_dma_channel *tdc)
bytes_xfer = dma_desc->bytes_xfer +
sg_req[dma_desc->sg_idx].len - (wcount * 4);
+ if (dma_desc->bytes_req == bytes_xfer)
+ return 0;
+
residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req);
return residual;
diff --git a/drivers/dma/xilinx/xdma-regs.h b/drivers/dma/xilinx/xdma-regs.h
index 98f5f6fb9ff9..6ad08878e938 100644
--- a/drivers/dma/xilinx/xdma-regs.h
+++ b/drivers/dma/xilinx/xdma-regs.h
@@ -117,6 +117,9 @@ struct xdma_hw_desc {
CHAN_CTRL_IE_WRITE_ERROR | \
CHAN_CTRL_IE_DESC_ERROR)
+/* bits of the channel status register */
+#define XDMA_CHAN_STATUS_BUSY BIT(0)
+
#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START
#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index 170017ff2aad..313b217388fe 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -71,6 +71,8 @@ struct xdma_chan {
enum dma_transfer_direction dir;
struct dma_slave_config cfg;
u32 irq;
+ struct completion last_interrupt;
+ bool stop_requested;
};
/**
@@ -376,6 +378,8 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
return ret;
xchan->busy = true;
+ xchan->stop_requested = false;
+ reinit_completion(&xchan->last_interrupt);
return 0;
}
@@ -387,7 +391,6 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
static int xdma_xfer_stop(struct xdma_chan *xchan)
{
int ret;
- u32 val;
struct xdma_device *xdev = xchan->xdev_hdl;
/* clear run stop bit to prevent any further auto-triggering */
@@ -395,13 +398,7 @@ static int xdma_xfer_stop(struct xdma_chan *xchan)
CHAN_CTRL_RUN_STOP);
if (ret)
return ret;
-
- /* Clear the channel status register */
- ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &val);
- if (ret)
- return ret;
-
- return 0;
+ return ret;
}
/**
@@ -474,6 +471,8 @@ static int xdma_alloc_channels(struct xdma_device *xdev,
xchan->xdev_hdl = xdev;
xchan->base = base + i * XDMA_CHAN_STRIDE;
xchan->dir = dir;
+ xchan->stop_requested = false;
+ init_completion(&xchan->last_interrupt);
ret = xdma_channel_init(xchan);
if (ret)
@@ -521,6 +520,7 @@ static int xdma_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
xdma_chan->busy = false;
+ xdma_chan->stop_requested = true;
vd = vchan_next_desc(&xdma_chan->vchan);
if (vd) {
list_del(&vd->node);
@@ -542,17 +542,26 @@ static int xdma_terminate_all(struct dma_chan *chan)
static void xdma_synchronize(struct dma_chan *chan)
{
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_device *xdev = xdma_chan->xdev_hdl;
+ int st = 0;
+
+ /* If the engine continues running, wait for the last interrupt */
+ regmap_read(xdev->rmap, xdma_chan->base + XDMA_CHAN_STATUS, &st);
+ if (st & XDMA_CHAN_STATUS_BUSY)
+ wait_for_completion_timeout(&xdma_chan->last_interrupt, msecs_to_jiffies(1000));
vchan_synchronize(&xdma_chan->vchan);
}
/**
- * xdma_fill_descs - Fill hardware descriptors with contiguous memory block addresses
- * @sw_desc: tx descriptor state container
- * @src_addr: Value for a ->src_addr field of a first descriptor
- * @dst_addr: Value for a ->dst_addr field of a first descriptor
- * @size: Total size of a contiguous memory block
- * @filled_descs_num: Number of filled hardware descriptors for corresponding sw_desc
+ * xdma_fill_descs() - Fill hardware descriptors for one contiguous memory chunk.
+ * More than one descriptor will be used if the size is bigger
+ * than XDMA_DESC_BLEN_MAX.
+ * @sw_desc: Descriptor container
+ * @src_addr: First value for the ->src_addr field
+ * @dst_addr: First value for the ->dst_addr field
+ * @size: Size of the contiguous memory block
+ * @filled_descs_num: Index of the first descriptor to take care of in @sw_desc
*/
static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr,
u64 dst_addr, u32 size, u32 filled_descs_num)
@@ -704,7 +713,7 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
desc_num = 0;
for (i = 0; i < periods; i++) {
desc_num += xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num);
- addr += i * period_size;
+ addr += period_size;
}
tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
@@ -876,6 +885,9 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
u32 st;
bool repeat_tx;
+ if (xchan->stop_requested)
+ complete(&xchan->last_interrupt);
+
spin_lock(&xchan->vchan.lock);
/* get submitted request */
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index b82815e64d24..eb0637d90342 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -214,7 +214,8 @@ struct xilinx_dpdma_tx_desc {
* @running: true if the channel is running
* @first_frame: flag for the first frame of stream
* @video_group: flag if multi-channel operation is needed for video channels
- * @lock: lock to access struct xilinx_dpdma_chan
+ * @lock: lock to access struct xilinx_dpdma_chan. Must be taken before
+ * @vchan.lock, if both are to be held.
* @desc_pool: descriptor allocation pool
* @err_task: error IRQ bottom half handler
* @desc: References to descriptors being processed
@@ -1097,12 +1098,14 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
* Complete the active descriptor, if any, promote the pending
* descriptor to active, and queue the next transfer, if any.
*/
+ spin_lock(&chan->vchan.lock);
if (chan->desc.active)
vchan_cookie_complete(&chan->desc.active->vdesc);
chan->desc.active = pending;
chan->desc.pending = NULL;
xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
out:
spin_unlock_irqrestore(&chan->lock, flags);
@@ -1264,10 +1267,12 @@ static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
unsigned long flags;
- spin_lock_irqsave(&chan->vchan.lock, flags);
+ spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->vchan.lock);
if (vchan_issue_pending(&chan->vchan))
xilinx_dpdma_chan_queue_transfer(chan);
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ spin_unlock(&chan->vchan.lock);
+ spin_unlock_irqrestore(&chan->lock, flags);
}
static int xilinx_dpdma_config(struct dma_chan *dchan,
@@ -1495,7 +1500,9 @@ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->vchan.lock);
xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
spin_unlock_irqrestore(&chan->lock, flags);
}
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
index 64eaca80d736..d0f6693ca142 100644
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@ -42,6 +42,7 @@ struct dpll_pin_registration {
struct list_head list;
const struct dpll_pin_ops *ops;
void *priv;
+ void *cookie;
};
struct dpll_device *dpll_device_get_by_id(int id)
@@ -54,12 +55,14 @@ struct dpll_device *dpll_device_get_by_id(int id)
static struct dpll_pin_registration *
dpll_pin_registration_find(struct dpll_pin_ref *ref,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv,
+ void *cookie)
{
struct dpll_pin_registration *reg;
list_for_each_entry(reg, &ref->registration_list, list) {
- if (reg->ops == ops && reg->priv == priv)
+ if (reg->ops == ops && reg->priv == priv &&
+ reg->cookie == cookie)
return reg;
}
return NULL;
@@ -67,7 +70,8 @@ dpll_pin_registration_find(struct dpll_pin_ref *ref,
static int
dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv,
+ void *cookie)
{
struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref;
@@ -78,7 +82,7 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
xa_for_each(xa_pins, i, ref) {
if (ref->pin != pin)
continue;
- reg = dpll_pin_registration_find(ref, ops, priv);
+ reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (reg) {
refcount_inc(&ref->refcount);
return 0;
@@ -111,6 +115,7 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
}
reg->ops = ops;
reg->priv = priv;
+ reg->cookie = cookie;
if (ref_exists)
refcount_inc(&ref->refcount);
list_add_tail(&reg->list, &ref->registration_list);
@@ -119,7 +124,8 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
}
static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv,
+ void *cookie)
{
struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref;
@@ -128,7 +134,7 @@ static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
xa_for_each(xa_pins, i, ref) {
if (ref->pin != pin)
continue;
- reg = dpll_pin_registration_find(ref, ops, priv);
+ reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (WARN_ON(!reg))
return -EINVAL;
list_del(&reg->list);
@@ -146,7 +152,7 @@ static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
static int
dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref;
@@ -157,7 +163,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
xa_for_each(xa_dplls, i, ref) {
if (ref->dpll != dpll)
continue;
- reg = dpll_pin_registration_find(ref, ops, priv);
+ reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (reg) {
refcount_inc(&ref->refcount);
return 0;
@@ -190,6 +196,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
}
reg->ops = ops;
reg->priv = priv;
+ reg->cookie = cookie;
if (ref_exists)
refcount_inc(&ref->refcount);
list_add_tail(&reg->list, &ref->registration_list);
@@ -199,7 +206,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
static void
dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref;
@@ -208,7 +215,7 @@ dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
xa_for_each(xa_dplls, i, ref) {
if (ref->dpll != dpll)
continue;
- reg = dpll_pin_registration_find(ref, ops, priv);
+ reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (WARN_ON(!reg))
return;
list_del(&reg->list);
@@ -594,14 +601,14 @@ EXPORT_SYMBOL_GPL(dpll_pin_put);
static int
__dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
int ret;
- ret = dpll_xa_ref_pin_add(&dpll->pin_refs, pin, ops, priv);
+ ret = dpll_xa_ref_pin_add(&dpll->pin_refs, pin, ops, priv, cookie);
if (ret)
return ret;
- ret = dpll_xa_ref_dpll_add(&pin->dpll_refs, dpll, ops, priv);
+ ret = dpll_xa_ref_dpll_add(&pin->dpll_refs, dpll, ops, priv, cookie);
if (ret)
goto ref_pin_del;
xa_set_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED);
@@ -610,7 +617,7 @@ __dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
return ret;
ref_pin_del:
- dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv);
+ dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv, cookie);
return ret;
}
@@ -642,7 +649,7 @@ dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
dpll->clock_id == pin->clock_id)))
ret = -EINVAL;
else
- ret = __dpll_pin_register(dpll, pin, ops, priv);
+ ret = __dpll_pin_register(dpll, pin, ops, priv, NULL);
mutex_unlock(&dpll_lock);
return ret;
@@ -651,11 +658,11 @@ EXPORT_SYMBOL_GPL(dpll_pin_register);
static void
__dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
ASSERT_DPLL_PIN_REGISTERED(pin);
- dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv);
- dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv);
+ dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv, cookie);
+ dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv, cookie);
if (xa_empty(&pin->dpll_refs))
xa_clear_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED);
}
@@ -680,7 +687,7 @@ void dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
mutex_lock(&dpll_lock);
dpll_pin_delete_ntf(pin);
- __dpll_pin_unregister(dpll, pin, ops, priv);
+ __dpll_pin_unregister(dpll, pin, ops, priv, NULL);
mutex_unlock(&dpll_lock);
}
EXPORT_SYMBOL_GPL(dpll_pin_unregister);
@@ -716,12 +723,12 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
return -EINVAL;
mutex_lock(&dpll_lock);
- ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv);
+ ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv, pin);
if (ret)
goto unlock;
refcount_inc(&pin->refcount);
xa_for_each(&parent->dpll_refs, i, ref) {
- ret = __dpll_pin_register(ref->dpll, pin, ops, priv);
+ ret = __dpll_pin_register(ref->dpll, pin, ops, priv, parent);
if (ret) {
stop = i;
goto dpll_unregister;
@@ -735,11 +742,12 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
dpll_unregister:
xa_for_each(&parent->dpll_refs, i, ref)
if (i < stop) {
- __dpll_pin_unregister(ref->dpll, pin, ops, priv);
+ __dpll_pin_unregister(ref->dpll, pin, ops, priv,
+ parent);
dpll_pin_delete_ntf(pin);
}
refcount_dec(&pin->refcount);
- dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv);
+ dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv, pin);
unlock:
mutex_unlock(&dpll_lock);
return ret;
@@ -764,10 +772,10 @@ void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin,
mutex_lock(&dpll_lock);
dpll_pin_delete_ntf(pin);
- dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv);
+ dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv, pin);
refcount_dec(&pin->refcount);
xa_for_each(&pin->dpll_refs, i, ref)
- __dpll_pin_unregister(ref->dpll, pin, ops, priv);
+ __dpll_pin_unregister(ref->dpll, pin, ops, priv, parent);
mutex_unlock(&dpll_lock);
}
EXPORT_SYMBOL_GPL(dpll_pin_on_pin_unregister);
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index ae17ce4d9722..fe89f5c4837f 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -737,8 +737,7 @@ static int altr_edac_device_probe(struct platform_device *pdev)
}
dci = edac_device_alloc_ctl_info(sizeof(*drvdata), ecc_name,
- 1, ecc_name, 1, 0, NULL, 0,
- dev_instance++);
+ 1, ecc_name, 1, 0, dev_instance++);
if (!dci) {
edac_printk(KERN_ERR, EDAC_DEVICE,
@@ -1514,7 +1513,7 @@ static int altr_portb_setup(struct altr_edac_device_dev *device)
/* Create the PortB EDAC device */
edac_idx = edac_device_alloc_index();
dci = edac_device_alloc_ctl_info(sizeof(*altdev), ecc_name, 1,
- ecc_name, 1, 0, NULL, 0, edac_idx);
+ ecc_name, 1, 0, edac_idx);
if (!dci) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"%s: Unable to allocate PortB EDAC device\n",
@@ -1921,8 +1920,7 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
edac_idx = edac_device_alloc_index();
dci = edac_device_alloc_ctl_info(sizeof(*altdev), ecc_name,
- 1, ecc_name, 1, 0, NULL, 0,
- edac_idx);
+ 1, ecc_name, 1, 0, edac_idx);
if (!dci) {
edac_printk(KERN_ERR, EDAC_DEVICE,
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 1665f7932bac..b879b12971e7 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -345,7 +345,6 @@ struct amd64_pvt {
u32 dchr1; /* DRAM Configuration High DCT1 reg */
u32 nbcap; /* North Bridge Capabilities */
u32 nbcfg; /* F10 North Bridge Configuration */
- u32 ext_nbcfg; /* Extended F10 North Bridge Configuration */
u32 dhar; /* DRAM Hoist reg */
u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c
index ca718f63fcbc..a6d3013d5823 100644
--- a/drivers/edac/amd8111_edac.c
+++ b/drivers/edac/amd8111_edac.c
@@ -366,8 +366,7 @@ static int amd8111_dev_probe(struct pci_dev *dev,
dev_info->edac_idx = edac_device_alloc_index();
dev_info->edac_dev =
edac_device_alloc_ctl_info(0, dev_info->ctl_name, 1,
- NULL, 0, 0,
- NULL, 0, dev_info->edac_idx);
+ NULL, 0, 0, dev_info->edac_idx);
if (!dev_info->edac_dev) {
ret = -ENOMEM;
goto err_dev_put;
diff --git a/drivers/edac/armada_xp_edac.c b/drivers/edac/armada_xp_edac.c
index 25517c99b3ea..589bc81f1249 100644
--- a/drivers/edac/armada_xp_edac.c
+++ b/drivers/edac/armada_xp_edac.c
@@ -523,7 +523,7 @@ static int aurora_l2_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "data ECC is not enabled\n");
dci = edac_device_alloc_ctl_info(sizeof(*drvdata),
- "cpu", 1, "L", 1, 2, NULL, 0, 0);
+ "cpu", 1, "L", 1, 2, 0);
if (!dci)
return -ENOMEM;
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index 5075dc7526e3..eb702bc3aa29 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -797,7 +797,7 @@ static void cpc925_add_edac_devices(void __iomem *vbase)
dev_info->edac_idx = edac_device_alloc_index();
dev_info->edac_dev =
edac_device_alloc_ctl_info(0, dev_info->ctl_name,
- 1, NULL, 0, 0, NULL, 0, dev_info->edac_idx);
+ 1, NULL, 0, 0, dev_info->edac_idx);
if (!dev_info->edac_dev) {
cpc925_printk(KERN_ERR, "No memory for edac device\n");
goto err1;
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 0689e1510721..621dc2a5d034 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -56,14 +56,12 @@ static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
struct edac_device_ctl_info *
edac_device_alloc_ctl_info(unsigned pvt_sz, char *dev_name, unsigned nr_instances,
char *blk_name, unsigned nr_blocks, unsigned off_val,
- struct edac_dev_sysfs_block_attribute *attrib_spec,
- unsigned nr_attrib, int device_index)
+ int device_index)
{
- struct edac_dev_sysfs_block_attribute *dev_attrib, *attrib_p, *attrib;
struct edac_device_block *dev_blk, *blk_p, *blk;
struct edac_device_instance *dev_inst, *inst;
struct edac_device_ctl_info *dev_ctl;
- unsigned instance, block, attr;
+ unsigned instance, block;
void *pvt;
int err;
@@ -85,15 +83,6 @@ edac_device_alloc_ctl_info(unsigned pvt_sz, char *dev_name, unsigned nr_instance
dev_ctl->blocks = dev_blk;
- if (nr_attrib) {
- dev_attrib = kcalloc(nr_attrib, sizeof(struct edac_dev_sysfs_block_attribute),
- GFP_KERNEL);
- if (!dev_attrib)
- goto free;
-
- dev_ctl->attribs = dev_attrib;
- }
-
if (pvt_sz) {
pvt = kzalloc(pvt_sz, GFP_KERNEL);
if (!pvt)
@@ -132,44 +121,6 @@ edac_device_alloc_ctl_info(unsigned pvt_sz, char *dev_name, unsigned nr_instance
edac_dbg(4, "instance=%d inst_p=%p block=#%d block_p=%p name='%s'\n",
instance, inst, block, blk, blk->name);
-
- /* if there are NO attributes OR no attribute pointer
- * then continue on to next block iteration
- */
- if ((nr_attrib == 0) || (attrib_spec == NULL))
- continue;
-
- /* setup the attribute array for this block */
- blk->nr_attribs = nr_attrib;
- attrib_p = &dev_attrib[block*nr_instances*nr_attrib];
- blk->block_attributes = attrib_p;
-
- edac_dbg(4, "THIS BLOCK_ATTRIB=%p\n",
- blk->block_attributes);
-
- /* Initialize every user specified attribute in this
- * block with the data the caller passed in
- * Each block gets its own copy of pointers,
- * and its unique 'value'
- */
- for (attr = 0; attr < nr_attrib; attr++) {
- attrib = &attrib_p[attr];
-
- /* populate the unique per attrib
- * with the code pointers and info
- */
- attrib->attr = attrib_spec[attr].attr;
- attrib->show = attrib_spec[attr].show;
- attrib->store = attrib_spec[attr].store;
-
- attrib->block = blk; /* up link */
-
- edac_dbg(4, "alloc-attrib=%p attrib_name='%s' attrib-spec=%p spec-name=%s\n",
- attrib, attrib->attr.name,
- &attrib_spec[attr],
- attrib_spec[attr].attr.name
- );
- }
}
}
diff --git a/drivers/edac/edac_device.h b/drivers/edac/edac_device.h
index 7db22a4c83ef..034711d71ebf 100644
--- a/drivers/edac/edac_device.h
+++ b/drivers/edac/edac_device.h
@@ -22,7 +22,6 @@
#ifndef _EDAC_DEVICE_H_
#define _EDAC_DEVICE_H_
-#include <linux/completion.h>
#include <linux/device.h>
#include <linux/edac.h>
#include <linux/kobject.h>
@@ -95,22 +94,13 @@ struct edac_dev_sysfs_attribute {
*
* used in leaf 'block' nodes for adding controls/attributes
*
- * each block in each instance of the containing control structure
- * can have an array of the following. The show and store functions
- * will be filled in with the show/store function in the
- * low level driver.
- *
- * The 'value' field will be the actual value field used for
- * counting
+ * each block in each instance of the containing control structure can
+ * have an array of the following. The show function will be filled in
+ * with the show function in the low level driver.
*/
struct edac_dev_sysfs_block_attribute {
struct attribute attr;
ssize_t (*show)(struct kobject *, struct attribute *, char *);
- ssize_t (*store)(struct kobject *, struct attribute *,
- const char *, size_t);
- struct edac_device_block *block;
-
- unsigned int value;
};
/* device block control structure */
@@ -200,8 +190,6 @@ struct edac_device_ctl_info {
unsigned long start_time; /* edac_device load start time (jiffies) */
- struct completion removal_complete;
-
/* sysfs top name under 'edac' directory
* and instance name:
* cpu/cpu0/...
@@ -217,7 +205,6 @@ struct edac_device_ctl_info {
u32 nr_instances;
struct edac_device_instance *instances;
struct edac_device_block *blocks;
- struct edac_dev_sysfs_block_attribute *attribs;
/* Event counters for the this whole EDAC Device */
struct edac_device_counter counters;
@@ -245,8 +232,6 @@ extern struct edac_device_ctl_info *edac_device_alloc_ctl_info(
char *edac_device_name, unsigned nr_instances,
char *edac_block_name, unsigned nr_blocks,
unsigned offset_value,
- struct edac_dev_sysfs_block_attribute *block_attributes,
- unsigned nr_attribs,
int device_index);
/* The offset value can be:
@@ -356,7 +341,6 @@ static inline void __edac_device_free_ctl_info(struct edac_device_ctl_info *ci)
{
if (ci) {
kfree(ci->pvt_info);
- kfree(ci->attribs);
kfree(ci->blocks);
kfree(ci->instances);
kfree(ci);
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
index 237a542e045a..fcebc4ffea26 100644
--- a/drivers/edac/edac_device_sysfs.c
+++ b/drivers/edac/edac_device_sysfs.c
@@ -457,35 +457,19 @@ static ssize_t edac_dev_block_show(struct kobject *kobj,
return -EIO;
}
-/* Function to 'store' fields into the edac_dev 'block' structure */
-static ssize_t edac_dev_block_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct edac_dev_sysfs_block_attribute *block_attr;
-
- block_attr = to_block_attr(attr);
-
- if (block_attr->store)
- return block_attr->store(kobj, attr, buffer, count);
- return -EIO;
-}
-
/* edac_dev file operations for a 'block' */
static const struct sysfs_ops device_block_ops = {
.show = edac_dev_block_show,
- .store = edac_dev_block_store
};
-#define BLOCK_ATTR(_name,_mode,_show,_store) \
+#define BLOCK_ATTR(_name,_mode,_show) \
static struct edac_dev_sysfs_block_attribute attr_block_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.show = _show, \
- .store = _store, \
};
-BLOCK_ATTR(ce_count, S_IRUGO, block_ce_count_show, NULL);
-BLOCK_ATTR(ue_count, S_IRUGO, block_ue_count_show, NULL);
+BLOCK_ATTR(ce_count, S_IRUGO, block_ce_count_show);
+BLOCK_ATTR(ue_count, S_IRUGO, block_ue_count_show);
/* list of edac_dev 'block' attributes */
static struct attribute *device_block_attrs[] = {
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 5116873c3330..4200aec04831 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -146,7 +146,7 @@ static ssize_t csrow_ue_count_show(struct device *dev,
{
struct csrow_info *csrow = to_csrow(dev);
- return sprintf(data, "%u\n", csrow->ue_count);
+ return sysfs_emit(data, "%u\n", csrow->ue_count);
}
static ssize_t csrow_ce_count_show(struct device *dev,
@@ -154,7 +154,7 @@ static ssize_t csrow_ce_count_show(struct device *dev,
{
struct csrow_info *csrow = to_csrow(dev);
- return sprintf(data, "%u\n", csrow->ce_count);
+ return sysfs_emit(data, "%u\n", csrow->ce_count);
}
static ssize_t csrow_size_show(struct device *dev,
@@ -166,7 +166,7 @@ static ssize_t csrow_size_show(struct device *dev,
for (i = 0; i < csrow->nr_channels; i++)
nr_pages += csrow->channels[i]->dimm->nr_pages;
- return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
+ return sysfs_emit(data, "%u\n", PAGES_TO_MiB(nr_pages));
}
static ssize_t csrow_mem_type_show(struct device *dev,
@@ -174,7 +174,7 @@ static ssize_t csrow_mem_type_show(struct device *dev,
{
struct csrow_info *csrow = to_csrow(dev);
- return sprintf(data, "%s\n", edac_mem_types[csrow->channels[0]->dimm->mtype]);
+ return sysfs_emit(data, "%s\n", edac_mem_types[csrow->channels[0]->dimm->mtype]);
}
static ssize_t csrow_dev_type_show(struct device *dev,
@@ -182,7 +182,7 @@ static ssize_t csrow_dev_type_show(struct device *dev,
{
struct csrow_info *csrow = to_csrow(dev);
- return sprintf(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]);
+ return sysfs_emit(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]);
}
static ssize_t csrow_edac_mode_show(struct device *dev,
@@ -191,7 +191,7 @@ static ssize_t csrow_edac_mode_show(struct device *dev,
{
struct csrow_info *csrow = to_csrow(dev);
- return sprintf(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]);
+ return sysfs_emit(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]);
}
/* show/store functions for DIMM Label attributes */
@@ -207,8 +207,7 @@ static ssize_t channel_dimm_label_show(struct device *dev,
if (!rank->dimm->label[0])
return 0;
- return snprintf(data, sizeof(rank->dimm->label) + 1, "%s\n",
- rank->dimm->label);
+ return sysfs_emit(data, "%s\n", rank->dimm->label);
}
static ssize_t channel_dimm_label_store(struct device *dev,
@@ -243,7 +242,7 @@ static ssize_t channel_ce_count_show(struct device *dev,
unsigned int chan = to_channel(mattr);
struct rank_info *rank = csrow->channels[chan];
- return sprintf(data, "%u\n", rank->ce_count);
+ return sysfs_emit(data, "%u\n", rank->ce_count);
}
/* cwrow<id>/attribute files */
@@ -515,7 +514,7 @@ static ssize_t dimmdev_label_show(struct device *dev,
if (!dimm->label[0])
return 0;
- return snprintf(data, sizeof(dimm->label) + 1, "%s\n", dimm->label);
+ return sysfs_emit(data, "%s\n", dimm->label);
}
static ssize_t dimmdev_label_store(struct device *dev,
@@ -546,7 +545,7 @@ static ssize_t dimmdev_size_show(struct device *dev,
{
struct dimm_info *dimm = to_dimm(dev);
- return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages));
+ return sysfs_emit(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages));
}
static ssize_t dimmdev_mem_type_show(struct device *dev,
@@ -554,7 +553,7 @@ static ssize_t dimmdev_mem_type_show(struct device *dev,
{
struct dimm_info *dimm = to_dimm(dev);
- return sprintf(data, "%s\n", edac_mem_types[dimm->mtype]);
+ return sysfs_emit(data, "%s\n", edac_mem_types[dimm->mtype]);
}
static ssize_t dimmdev_dev_type_show(struct device *dev,
@@ -562,7 +561,7 @@ static ssize_t dimmdev_dev_type_show(struct device *dev,
{
struct dimm_info *dimm = to_dimm(dev);
- return sprintf(data, "%s\n", dev_types[dimm->dtype]);
+ return sysfs_emit(data, "%s\n", dev_types[dimm->dtype]);
}
static ssize_t dimmdev_edac_mode_show(struct device *dev,
@@ -571,7 +570,7 @@ static ssize_t dimmdev_edac_mode_show(struct device *dev,
{
struct dimm_info *dimm = to_dimm(dev);
- return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]);
+ return sysfs_emit(data, "%s\n", edac_caps[dimm->edac_mode]);
}
static ssize_t dimmdev_ce_count_show(struct device *dev,
@@ -580,7 +579,7 @@ static ssize_t dimmdev_ce_count_show(struct device *dev,
{
struct dimm_info *dimm = to_dimm(dev);
- return sprintf(data, "%u\n", dimm->ce_count);
+ return sysfs_emit(data, "%u\n", dimm->ce_count);
}
static ssize_t dimmdev_ue_count_show(struct device *dev,
@@ -589,7 +588,7 @@ static ssize_t dimmdev_ue_count_show(struct device *dev,
{
struct dimm_info *dimm = to_dimm(dev);
- return sprintf(data, "%u\n", dimm->ue_count);
+ return sysfs_emit(data, "%u\n", dimm->ue_count);
}
/* dimm/rank attribute files */
@@ -758,7 +757,7 @@ static ssize_t mci_sdram_scrub_rate_show(struct device *dev,
return bandwidth;
}
- return sprintf(data, "%d\n", bandwidth);
+ return sysfs_emit(data, "%d\n", bandwidth);
}
/* default attribute files for the MCI object */
@@ -768,7 +767,7 @@ static ssize_t mci_ue_count_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%u\n", mci->ue_mc);
+ return sysfs_emit(data, "%u\n", mci->ue_mc);
}
static ssize_t mci_ce_count_show(struct device *dev,
@@ -777,7 +776,7 @@ static ssize_t mci_ce_count_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%u\n", mci->ce_mc);
+ return sysfs_emit(data, "%u\n", mci->ce_mc);
}
static ssize_t mci_ce_noinfo_show(struct device *dev,
@@ -786,7 +785,7 @@ static ssize_t mci_ce_noinfo_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%u\n", mci->ce_noinfo_count);
+ return sysfs_emit(data, "%u\n", mci->ce_noinfo_count);
}
static ssize_t mci_ue_noinfo_show(struct device *dev,
@@ -795,7 +794,7 @@ static ssize_t mci_ue_noinfo_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%u\n", mci->ue_noinfo_count);
+ return sysfs_emit(data, "%u\n", mci->ue_noinfo_count);
}
static ssize_t mci_seconds_show(struct device *dev,
@@ -804,7 +803,7 @@ static ssize_t mci_seconds_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ);
+ return sysfs_emit(data, "%ld\n", (jiffies - mci->start_time) / HZ);
}
static ssize_t mci_ctl_name_show(struct device *dev,
@@ -813,7 +812,7 @@ static ssize_t mci_ctl_name_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%s\n", mci->ctl_name);
+ return sysfs_emit(data, "%s\n", mci->ctl_name);
}
static ssize_t mci_size_mb_show(struct device *dev,
@@ -833,7 +832,7 @@ static ssize_t mci_size_mb_show(struct device *dev,
}
}
- return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
+ return sysfs_emit(data, "%u\n", PAGES_TO_MiB(total_pages));
}
static ssize_t mci_max_location_show(struct device *dev,
diff --git a/drivers/edac/edac_pci.h b/drivers/edac/edac_pci.h
index 5175f5724cfa..3f47cd9b2b03 100644
--- a/drivers/edac/edac_pci.h
+++ b/drivers/edac/edac_pci.h
@@ -22,7 +22,6 @@
#ifndef _EDAC_PCI_H_
#define _EDAC_PCI_H_
-#include <linux/completion.h>
#include <linux/device.h>
#include <linux/edac.h>
#include <linux/kobject.h>
@@ -48,8 +47,6 @@ struct edac_pci_ctl_info {
int pci_idx;
- struct bus_type *edac_subsys; /* pointer to subsystem */
-
/* the internal state of this controller instance */
int op_state;
/* work struct for this instance */
@@ -72,8 +69,6 @@ struct edac_pci_ctl_info {
unsigned long start_time; /* edac_pci load start time (jiffies) */
- struct completion complete;
-
/* sysfs top name under 'edac' directory
* and instance name:
* cpu/cpu0/...
diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c
index 5646c049a934..282ca6535f8f 100644
--- a/drivers/edac/highbank_l2_edac.c
+++ b/drivers/edac/highbank_l2_edac.c
@@ -54,7 +54,7 @@ static int highbank_l2_err_probe(struct platform_device *pdev)
int res = 0;
dci = edac_device_alloc_ctl_info(sizeof(*drvdata), "cpu",
- 1, "L", 1, 2, NULL, 0, 0);
+ 1, "L", 1, 2, 0);
if (!dci)
return -ENOMEM;
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index c1bc53f4e184..e8945d4adbad 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -496,7 +496,7 @@ static int mpc85xx_l2_err_probe(struct platform_device *op)
return -ENOMEM;
edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
- "cpu", 1, "L", 1, 2, NULL, 0,
+ "cpu", 1, "L", 1, 2,
edac_dev_idx);
if (!edac_dev) {
devres_release_group(&op->dev, mpc85xx_l2_err_probe);
diff --git a/drivers/edac/octeon_edac-l2c.c b/drivers/edac/octeon_edac-l2c.c
index 4015eb9af6fe..919095d10528 100644
--- a/drivers/edac/octeon_edac-l2c.c
+++ b/drivers/edac/octeon_edac-l2c.c
@@ -138,7 +138,7 @@ static int octeon_l2c_probe(struct platform_device *pdev)
/* 'Tags' are block 0, 'Data' is block 1*/
l2c = edac_device_alloc_ctl_info(0, "l2c", num_tads, "l2c", 2, 0,
- NULL, 0, edac_device_alloc_index());
+ edac_device_alloc_index());
if (!l2c)
return -ENOMEM;
diff --git a/drivers/edac/octeon_edac-pc.c b/drivers/edac/octeon_edac-pc.c
index ea8a8e337b1e..b8404cc7b65f 100644
--- a/drivers/edac/octeon_edac-pc.c
+++ b/drivers/edac/octeon_edac-pc.c
@@ -92,7 +92,7 @@ static int co_cache_error_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, p);
p->ed = edac_device_alloc_ctl_info(0, "cpu", num_possible_cpus(),
- "cache", 2, 0, NULL, 0,
+ "cache", 2, 0,
edac_device_alloc_index());
if (!p->ed)
goto err;
diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
index 5539917c01dd..d3cd4cc54ace 100644
--- a/drivers/edac/qcom_edac.c
+++ b/drivers/edac/qcom_edac.c
@@ -349,7 +349,6 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
/* Allocate edac control info */
edev_ctl = edac_device_alloc_ctl_info(0, "qcom-llcc", 1, "bank",
llcc_driv_data->num_banks, 1,
- NULL, 0,
edac_device_alloc_index());
if (!edev_ctl)
diff --git a/drivers/edac/sifive_edac.c b/drivers/edac/sifive_edac.c
index b844e2626fd5..a2b193dc6604 100644
--- a/drivers/edac/sifive_edac.c
+++ b/drivers/edac/sifive_edac.c
@@ -52,8 +52,7 @@ static int ecc_register(struct platform_device *pdev)
platform_set_drvdata(pdev, p);
p->dci = edac_device_alloc_ctl_info(0, "sifive_ecc", 1, "sifive_ecc",
- 1, 1, NULL, 0,
- edac_device_alloc_index());
+ 1, 1, edac_device_alloc_index());
if (!p->dci)
return -ENOMEM;
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 9c5b6f8bd8bd..27996b7924c8 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -648,7 +648,7 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
memset(&res, 0, sizeof(res));
res.mce = mce;
res.addr = mce->addr & MCI_ADDR_PHYSADDR;
- if (!pfn_to_online_page(res.addr >> PAGE_SHIFT)) {
+ if (!pfn_to_online_page(res.addr >> PAGE_SHIFT) && !arch_is_platform_page(res.addr)) {
pr_err("Invalid address 0x%llx in IA32_MC%d_ADDR\n", mce->addr, mce->bank);
return NOTIFY_DONE;
}
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index 5527055b0964..ea7a9a342dd3 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -9,6 +9,7 @@
#include <linux/edac.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/of.h>
@@ -299,6 +300,7 @@ struct synps_ecc_status {
/**
* struct synps_edac_priv - DDR memory controller private instance data.
* @baseaddr: Base address of the DDR controller.
+ * @reglock: Concurrent CSRs access lock.
* @message: Buffer for framing the event specific info.
* @stat: ECC status information.
* @p_data: Platform data.
@@ -313,6 +315,7 @@ struct synps_ecc_status {
*/
struct synps_edac_priv {
void __iomem *baseaddr;
+ spinlock_t reglock;
char message[SYNPS_EDAC_MSG_SIZE];
struct synps_ecc_status stat;
const struct synps_platform_data *p_data;
@@ -408,7 +411,8 @@ out:
static int zynqmp_get_error_info(struct synps_edac_priv *priv)
{
struct synps_ecc_status *p;
- u32 regval, clearval = 0;
+ u32 regval, clearval;
+ unsigned long flags;
void __iomem *base;
base = priv->baseaddr;
@@ -452,10 +456,14 @@ ue_err:
p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
p->ueinfo.data = readl(base + ECC_UESYND0_OFST);
out:
- clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT;
- clearval |= ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
+ spin_lock_irqsave(&priv->reglock, flags);
+
+ clearval = readl(base + ECC_CLR_OFST) |
+ ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT |
+ ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
writel(clearval, base + ECC_CLR_OFST);
- writel(0x0, base + ECC_CLR_OFST);
+
+ spin_unlock_irqrestore(&priv->reglock, flags);
return 0;
}
@@ -515,24 +523,41 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
static void enable_intr(struct synps_edac_priv *priv)
{
+ unsigned long flags;
+
/* Enable UE/CE Interrupts */
- if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
- writel(DDR_UE_MASK | DDR_CE_MASK,
- priv->baseaddr + ECC_CLR_OFST);
- else
+ if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
+ return;
+ }
+
+ spin_lock_irqsave(&priv->reglock, flags);
+
+ writel(DDR_UE_MASK | DDR_CE_MASK,
+ priv->baseaddr + ECC_CLR_OFST);
+
+ spin_unlock_irqrestore(&priv->reglock, flags);
}
static void disable_intr(struct synps_edac_priv *priv)
{
+ unsigned long flags;
+
/* Disable UE/CE Interrupts */
- if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
- writel(0x0, priv->baseaddr + ECC_CLR_OFST);
- else
+ if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
+
+ return;
+ }
+
+ spin_lock_irqsave(&priv->reglock, flags);
+
+ writel(0, priv->baseaddr + ECC_CLR_OFST);
+
+ spin_unlock_irqrestore(&priv->reglock, flags);
}
/**
@@ -576,8 +601,6 @@ static irqreturn_t intr_handler(int irq, void *dev_id)
/* v3.0 of the controller does not have this register */
if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
- else
- enable_intr(priv);
return IRQ_HANDLED;
}
@@ -1357,6 +1380,7 @@ static int mc_probe(struct platform_device *pdev)
priv = mci->pvt_info;
priv->baseaddr = baseaddr;
priv->p_data = p_data;
+ spin_lock_init(&priv->reglock);
mc_init(mci, pdev);
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index 90d46e5c4ff0..fab9891e569a 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -1365,8 +1365,7 @@ static int thunderx_ocx_probe(struct pci_dev *pdev,
idx = edac_device_alloc_index();
snprintf(name, sizeof(name), "OCX%d", idx);
edac_dev = edac_device_alloc_ctl_info(sizeof(struct thunderx_ocx),
- name, 1, "CCPI", 1,
- 0, NULL, 0, idx);
+ name, 1, "CCPI", 1, 0, idx);
if (!edac_dev) {
dev_err(&pdev->dev, "Cannot allocate EDAC device\n");
return -ENOMEM;
@@ -2004,8 +2003,7 @@ static int thunderx_l2c_probe(struct pci_dev *pdev,
snprintf(name, sizeof(name), fmt, idx);
edac_dev = edac_device_alloc_ctl_info(sizeof(struct thunderx_l2c),
- name, 1, "L2C", 1, 0,
- NULL, 0, idx);
+ name, 1, "L2C", 1, 0, idx);
if (!edac_dev) {
dev_err(&pdev->dev, "Cannot allocate EDAC device\n");
return -ENOMEM;
diff --git a/drivers/edac/versal_edac.c b/drivers/edac/versal_edac.c
index 1688a5050f63..a556d23e8261 100644
--- a/drivers/edac/versal_edac.c
+++ b/drivers/edac/versal_edac.c
@@ -425,7 +425,7 @@ static void handle_error(struct mem_ctl_info *mci, struct ecc_status *stat)
convert_to_physical(priv, pinf), pinf.burstpos);
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
- priv->ce_cnt, 0, 0, 0, 0, 0, -1,
+ 1, 0, 0, 0, 0, 0, -1,
priv->message, "");
}
@@ -438,7 +438,7 @@ static void handle_error(struct mem_ctl_info *mci, struct ecc_status *stat)
convert_to_physical(priv, pinf), pinf.burstpos);
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
- priv->ue_cnt, 0, 0, 0, 0, 0, -1,
+ 1, 0, 0, 0, 0, 0, -1,
priv->message, "");
}
@@ -865,6 +865,9 @@ static ssize_t inject_data_ue_store(struct file *file, const char __user *data,
for (i = 0; i < NUM_UE_BITPOS; i++)
token[i] = strsep(&pbuf, ",");
+ if (!token[0] || !token[1])
+ return -EFAULT;
+
ret = kstrtou8(token[0], 0, &ue0);
if (ret)
return ret;
@@ -1135,8 +1138,7 @@ static int mc_probe(struct platform_device *pdev)
}
rc = xlnx_register_event(PM_NOTIFY_CB, VERSAL_EVENT_ERROR_PMC_ERR1,
- XPM_EVENT_ERROR_MASK_DDRMC_CR | XPM_EVENT_ERROR_MASK_DDRMC_NCR |
- XPM_EVENT_ERROR_MASK_NOC_CR | XPM_EVENT_ERROR_MASK_NOC_NCR,
+ XPM_EVENT_ERROR_MASK_DDRMC_CR | XPM_EVENT_ERROR_MASK_DDRMC_NCR,
false, err_callback, mci);
if (rc) {
if (rc == -EACCES)
@@ -1173,8 +1175,6 @@ static void mc_remove(struct platform_device *pdev)
xlnx_unregister_event(PM_NOTIFY_CB, VERSAL_EVENT_ERROR_PMC_ERR1,
XPM_EVENT_ERROR_MASK_DDRMC_CR |
- XPM_EVENT_ERROR_MASK_NOC_CR |
- XPM_EVENT_ERROR_MASK_NOC_NCR |
XPM_EVENT_ERROR_MASK_DDRMC_NCR, err_callback, mci);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 1b50f8160013..fd87f1b2c145 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -913,8 +913,8 @@ static int xgene_edac_pmd_add(struct xgene_edac *edac, struct device_node *np,
snprintf(edac_name, sizeof(edac_name), "l2c%d", pmd);
edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx),
- edac_name, 1, "l2c", 1, 2, NULL,
- 0, edac_device_alloc_index());
+ edac_name, 1, "l2c", 1, 2,
+ edac_device_alloc_index());
if (!edac_dev) {
rc = -ENOMEM;
goto err_group;
@@ -1208,8 +1208,7 @@ static int xgene_edac_l3_add(struct xgene_edac *edac, struct device_node *np,
edac_idx = edac_device_alloc_index();
edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx),
- "l3c", 1, "l3c", 1, 0, NULL, 0,
- edac_idx);
+ "l3c", 1, "l3c", 1, 0, edac_idx);
if (!edac_dev) {
rc = -ENOMEM;
goto err_release_group;
@@ -1748,8 +1747,7 @@ static int xgene_edac_soc_add(struct xgene_edac *edac, struct device_node *np,
edac_idx = edac_device_alloc_index();
edac_dev = edac_device_alloc_ctl_info(sizeof(*ctx),
- "SOC", 1, "SOC", 1, 2, NULL, 0,
- edac_idx);
+ "SOC", 1, "SOC", 1, 2, edac_idx);
if (!edac_dev) {
rc = -ENOMEM;
goto err_release_group;
diff --git a/drivers/edac/zynqmp_edac.c b/drivers/edac/zynqmp_edac.c
index 2d9a5cfd8931..c9dc78d8c824 100644
--- a/drivers/edac/zynqmp_edac.c
+++ b/drivers/edac/zynqmp_edac.c
@@ -381,7 +381,7 @@ static int edac_probe(struct platform_device *pdev)
}
dci = edac_device_alloc_ctl_info(sizeof(*priv), ZYNQMP_OCM_EDAC_STRING,
- 1, ZYNQMP_OCM_EDAC_STRING, 1, 0, NULL, 0,
+ 1, ZYNQMP_OCM_EDAC_STRING, 1, 0,
edac_device_alloc_index());
if (!dci)
return -ENOMEM;
diff --git a/drivers/eisa/Kconfig b/drivers/eisa/Kconfig
index c8bbf90209f5..a66b3be502a9 100644
--- a/drivers/eisa/Kconfig
+++ b/drivers/eisa/Kconfig
@@ -44,17 +44,16 @@ config EISA_PCI_EISA
When in doubt, say Y.
-# Using EISA_VIRTUAL_ROOT on something other than an Alpha or
-# an X86 may lead to crashes...
+# Using EISA_VIRTUAL_ROOT on something other than an X86 may lead
+# to crashes...
config EISA_VIRTUAL_ROOT
bool "EISA virtual root device"
- depends on EISA && (ALPHA || X86)
+ depends on EISA && X86
default y
help
Activate this option if your system only have EISA bus
- (no PCI slots). The Alpha Jensen is an example of such
- a system.
+ (no PCI slots).
When in doubt, say Y.
diff --git a/drivers/eisa/virtual_root.c b/drivers/eisa/virtual_root.c
index 37e6dd219c37..cd9515d9d8f0 100644
--- a/drivers/eisa/virtual_root.c
+++ b/drivers/eisa/virtual_root.c
@@ -13,7 +13,7 @@
#include <linux/moduleparam.h>
#include <linux/init.h>
-#if defined(CONFIG_ALPHA_JENSEN) || defined(CONFIG_EISA_VLB_PRIMING)
+#if defined(CONFIG_EISA_VLB_PRIMING)
#define EISA_FORCE_PROBE_DEFAULT 1
#else
#define EISA_FORCE_PROBE_DEFAULT 0
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index b0d671db178a..ea31ac7ac1ca 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -148,10 +148,12 @@ packet_buffer_get(struct client *client, char __user *data, size_t user_length)
if (atomic_read(&buffer->size) == 0)
return -ENODEV;
- /* FIXME: Check length <= user_length. */
+ length = buffer->head->length;
+
+ if (length > user_length)
+ return 0;
end = buffer->data + buffer->capacity;
- length = buffer->head->length;
if (&buffer->head->data[length] < end) {
if (copy_to_user(data, buffer->head->data, length))
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 38d19410a2be..b9ae0340b8a7 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -1556,6 +1556,8 @@ static int handle_at_packet(struct context *context,
#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
+static u32 get_cycle_time(struct fw_ohci *ohci);
+
static void handle_local_rom(struct fw_ohci *ohci,
struct fw_packet *packet, u32 csr)
{
@@ -1580,6 +1582,8 @@ static void handle_local_rom(struct fw_ohci *ohci,
(void *) ohci->config_rom + i, length);
}
+ // Timestamping on behalf of the hardware.
+ response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
fw_core_handle_response(&ohci->card, &response);
}
@@ -1628,6 +1632,8 @@ static void handle_local_lock(struct fw_ohci *ohci,
fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
out:
+ // Timestamping on behalf of the hardware.
+ response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
fw_core_handle_response(&ohci->card, &response);
}
@@ -1670,8 +1676,6 @@ static void handle_local_request(struct context *ctx, struct fw_packet *packet)
}
}
-static u32 get_cycle_time(struct fw_ohci *ohci);
-
static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
{
unsigned long flags;
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index f2556a8e9401..1609247cfafc 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -101,11 +101,12 @@ struct ffa_drv_info {
bool bitmap_created;
bool notif_enabled;
unsigned int sched_recv_irq;
+ unsigned int notif_pend_irq;
unsigned int cpuhp_state;
struct ffa_pcpu_irq __percpu *irq_pcpu;
struct workqueue_struct *notif_pcpu_wq;
struct work_struct notif_pcpu_work;
- struct work_struct irq_work;
+ struct work_struct sched_recv_irq_work;
struct xarray partition_info;
DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS));
struct mutex notify_lock; /* lock to protect notifier hashtable */
@@ -344,6 +345,38 @@ static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit,
return -EINVAL;
}
+static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz)
+{
+ u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
+ struct ffa_indirect_msg_hdr *msg;
+ ffa_value_t ret;
+ int retval = 0;
+
+ if (sz > (RXTX_BUFFER_SIZE - sizeof(*msg)))
+ return -ERANGE;
+
+ mutex_lock(&drv_info->tx_lock);
+
+ msg = drv_info->tx_buffer;
+ msg->flags = 0;
+ msg->res0 = 0;
+ msg->offset = sizeof(*msg);
+ msg->send_recv_id = src_dst_ids;
+ msg->size = sz;
+ memcpy((u8 *)msg + msg->offset, buf, sz);
+
+ /* flags = 0, sender VMID = 0 works for both physical/virtual NS */
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_MSG_SEND2, .a1 = 0, .a2 = 0
+ }, &ret);
+
+ if (ret.a0 == FFA_ERROR)
+ retval = ffa_to_linux_errno((int)ret.a2);
+
+ mutex_unlock(&drv_info->tx_lock);
+ return retval;
+}
+
static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz,
u32 frag_len, u32 len, u64 *handle)
{
@@ -790,7 +823,7 @@ static void ffa_notification_info_get(void)
part_id = packed_id_list[ids_processed++];
- if (!ids_count[list]) { /* Global Notification */
+ if (ids_count[list] == 1) { /* Global Notification */
__do_sched_recv_cb(part_id, 0, false);
continue;
}
@@ -870,6 +903,11 @@ static int ffa_sync_send_receive(struct ffa_device *dev,
dev->mode_32bit, data);
}
+static int ffa_indirect_msg_send(struct ffa_device *dev, void *buf, size_t sz)
+{
+ return ffa_msg_send2(drv_info->vm_id, dev->vm_id, buf, sz);
+}
+
static int ffa_memory_share(struct ffa_mem_ops_args *args)
{
if (drv_info->mem_ops_native)
@@ -1108,7 +1146,7 @@ static void handle_notif_callbacks(u64 bitmap, enum notify_type type)
}
}
-static void notif_pcpu_irq_work_fn(struct work_struct *work)
+static void notif_get_and_handle(void *unused)
{
int rc;
struct ffa_notify_bitmaps bitmaps;
@@ -1131,10 +1169,17 @@ ffa_self_notif_handle(u16 vcpu, bool is_per_vcpu, void *cb_data)
struct ffa_drv_info *info = cb_data;
if (!is_per_vcpu)
- notif_pcpu_irq_work_fn(&info->notif_pcpu_work);
+ notif_get_and_handle(info);
else
- queue_work_on(vcpu, info->notif_pcpu_wq,
- &info->notif_pcpu_work);
+ smp_call_function_single(vcpu, notif_get_and_handle, info, 0);
+}
+
+static void notif_pcpu_irq_work_fn(struct work_struct *work)
+{
+ struct ffa_drv_info *info = container_of(work, struct ffa_drv_info,
+ notif_pcpu_work);
+
+ ffa_self_notif_handle(smp_processor_id(), true, info);
}
static const struct ffa_info_ops ffa_drv_info_ops = {
@@ -1145,6 +1190,7 @@ static const struct ffa_info_ops ffa_drv_info_ops = {
static const struct ffa_msg_ops ffa_drv_msg_ops = {
.mode_32bit_set = ffa_mode_32bit_set,
.sync_send_receive = ffa_sync_send_receive,
+ .indirect_send = ffa_indirect_msg_send,
};
static const struct ffa_mem_ops ffa_drv_mem_ops = {
@@ -1227,6 +1273,8 @@ static int ffa_setup_partitions(void)
continue;
}
+ ffa_dev->properties = tpbuf->properties;
+
if (drv_info->version > FFA_VERSION_1_0 &&
!(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
ffa_mode_32bit_set(ffa_dev);
@@ -1291,12 +1339,23 @@ static void ffa_partitions_cleanup(void)
#define FFA_FEAT_SCHEDULE_RECEIVER_INT (2)
#define FFA_FEAT_MANAGED_EXIT_INT (3)
-static irqreturn_t irq_handler(int irq, void *irq_data)
+static irqreturn_t ffa_sched_recv_irq_handler(int irq, void *irq_data)
+{
+ struct ffa_pcpu_irq *pcpu = irq_data;
+ struct ffa_drv_info *info = pcpu->info;
+
+ queue_work(info->notif_pcpu_wq, &info->sched_recv_irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t notif_pend_irq_handler(int irq, void *irq_data)
{
struct ffa_pcpu_irq *pcpu = irq_data;
struct ffa_drv_info *info = pcpu->info;
- queue_work(info->notif_pcpu_wq, &info->irq_work);
+ queue_work_on(smp_processor_id(), info->notif_pcpu_wq,
+ &info->notif_pcpu_work);
return IRQ_HANDLED;
}
@@ -1306,15 +1365,23 @@ static void ffa_sched_recv_irq_work_fn(struct work_struct *work)
ffa_notification_info_get();
}
-static int ffa_sched_recv_irq_map(void)
+static int ffa_irq_map(u32 id)
{
- int ret, irq, sr_intid;
+ char *err_str;
+ int ret, irq, intid;
- /* The returned sr_intid is assumed to be SGI donated to NS world */
- ret = ffa_features(FFA_FEAT_SCHEDULE_RECEIVER_INT, 0, &sr_intid, NULL);
+ if (id == FFA_FEAT_NOTIFICATION_PENDING_INT)
+ err_str = "Notification Pending Interrupt";
+ else if (id == FFA_FEAT_SCHEDULE_RECEIVER_INT)
+ err_str = "Schedule Receiver Interrupt";
+ else
+ err_str = "Unknown ID";
+
+ /* The returned intid is assumed to be SGI donated to NS world */
+ ret = ffa_features(id, 0, &intid, NULL);
if (ret < 0) {
if (ret != -EOPNOTSUPP)
- pr_err("Failed to retrieve scheduler Rx interrupt\n");
+ pr_err("Failed to retrieve FF-A %s %u\n", err_str, id);
return ret;
}
@@ -1329,12 +1396,12 @@ static int ffa_sched_recv_irq_map(void)
oirq.np = gic;
oirq.args_count = 1;
- oirq.args[0] = sr_intid;
+ oirq.args[0] = intid;
irq = irq_create_of_mapping(&oirq);
of_node_put(gic);
#ifdef CONFIG_ACPI
} else {
- irq = acpi_register_gsi(NULL, sr_intid, ACPI_EDGE_SENSITIVE,
+ irq = acpi_register_gsi(NULL, intid, ACPI_EDGE_SENSITIVE,
ACPI_ACTIVE_HIGH);
#endif
}
@@ -1347,23 +1414,28 @@ static int ffa_sched_recv_irq_map(void)
return irq;
}
-static void ffa_sched_recv_irq_unmap(void)
+static void ffa_irq_unmap(unsigned int irq)
{
- if (drv_info->sched_recv_irq) {
- irq_dispose_mapping(drv_info->sched_recv_irq);
- drv_info->sched_recv_irq = 0;
- }
+ if (!irq)
+ return;
+ irq_dispose_mapping(irq);
}
static int ffa_cpuhp_pcpu_irq_enable(unsigned int cpu)
{
- enable_percpu_irq(drv_info->sched_recv_irq, IRQ_TYPE_NONE);
+ if (drv_info->sched_recv_irq)
+ enable_percpu_irq(drv_info->sched_recv_irq, IRQ_TYPE_NONE);
+ if (drv_info->notif_pend_irq)
+ enable_percpu_irq(drv_info->notif_pend_irq, IRQ_TYPE_NONE);
return 0;
}
static int ffa_cpuhp_pcpu_irq_disable(unsigned int cpu)
{
- disable_percpu_irq(drv_info->sched_recv_irq);
+ if (drv_info->sched_recv_irq)
+ disable_percpu_irq(drv_info->sched_recv_irq);
+ if (drv_info->notif_pend_irq)
+ disable_percpu_irq(drv_info->notif_pend_irq);
return 0;
}
@@ -1382,13 +1454,16 @@ static void ffa_uninit_pcpu_irq(void)
if (drv_info->sched_recv_irq)
free_percpu_irq(drv_info->sched_recv_irq, drv_info->irq_pcpu);
+ if (drv_info->notif_pend_irq)
+ free_percpu_irq(drv_info->notif_pend_irq, drv_info->irq_pcpu);
+
if (drv_info->irq_pcpu) {
free_percpu(drv_info->irq_pcpu);
drv_info->irq_pcpu = NULL;
}
}
-static int ffa_init_pcpu_irq(unsigned int irq)
+static int ffa_init_pcpu_irq(void)
{
struct ffa_pcpu_irq __percpu *irq_pcpu;
int ret, cpu;
@@ -1402,13 +1477,31 @@ static int ffa_init_pcpu_irq(unsigned int irq)
drv_info->irq_pcpu = irq_pcpu;
- ret = request_percpu_irq(irq, irq_handler, "ARM-FFA", irq_pcpu);
- if (ret) {
- pr_err("Error registering notification IRQ %d: %d\n", irq, ret);
- return ret;
+ if (drv_info->sched_recv_irq) {
+ ret = request_percpu_irq(drv_info->sched_recv_irq,
+ ffa_sched_recv_irq_handler,
+ "ARM-FFA-SRI", irq_pcpu);
+ if (ret) {
+ pr_err("Error registering percpu SRI nIRQ %d : %d\n",
+ drv_info->sched_recv_irq, ret);
+ drv_info->sched_recv_irq = 0;
+ return ret;
+ }
}
- INIT_WORK(&drv_info->irq_work, ffa_sched_recv_irq_work_fn);
+ if (drv_info->notif_pend_irq) {
+ ret = request_percpu_irq(drv_info->notif_pend_irq,
+ notif_pend_irq_handler,
+ "ARM-FFA-NPI", irq_pcpu);
+ if (ret) {
+ pr_err("Error registering percpu NPI nIRQ %d : %d\n",
+ drv_info->notif_pend_irq, ret);
+ drv_info->notif_pend_irq = 0;
+ return ret;
+ }
+ }
+
+ INIT_WORK(&drv_info->sched_recv_irq_work, ffa_sched_recv_irq_work_fn);
INIT_WORK(&drv_info->notif_pcpu_work, notif_pcpu_irq_work_fn);
drv_info->notif_pcpu_wq = create_workqueue("ffa_pcpu_irq_notification");
if (!drv_info->notif_pcpu_wq)
@@ -1428,7 +1521,10 @@ static int ffa_init_pcpu_irq(unsigned int irq)
static void ffa_notifications_cleanup(void)
{
ffa_uninit_pcpu_irq();
- ffa_sched_recv_irq_unmap();
+ ffa_irq_unmap(drv_info->sched_recv_irq);
+ drv_info->sched_recv_irq = 0;
+ ffa_irq_unmap(drv_info->notif_pend_irq);
+ drv_info->notif_pend_irq = 0;
if (drv_info->bitmap_created) {
ffa_notification_bitmap_destroy();
@@ -1439,30 +1535,31 @@ static void ffa_notifications_cleanup(void)
static void ffa_notifications_setup(void)
{
- int ret, irq;
+ int ret;
ret = ffa_features(FFA_NOTIFICATION_BITMAP_CREATE, 0, NULL, NULL);
- if (ret) {
- pr_info("Notifications not supported, continuing with it ..\n");
- return;
- }
+ if (!ret) {
+ ret = ffa_notification_bitmap_create();
+ if (ret) {
+ pr_err("Notification bitmap create error %d\n", ret);
+ return;
+ }
- ret = ffa_notification_bitmap_create();
- if (ret) {
- pr_info("Notification bitmap create error %d\n", ret);
- return;
+ drv_info->bitmap_created = true;
}
- drv_info->bitmap_created = true;
- irq = ffa_sched_recv_irq_map();
- if (irq <= 0) {
- ret = irq;
- goto cleanup;
- }
+ ret = ffa_irq_map(FFA_FEAT_SCHEDULE_RECEIVER_INT);
+ if (ret > 0)
+ drv_info->sched_recv_irq = ret;
+
+ ret = ffa_irq_map(FFA_FEAT_NOTIFICATION_PENDING_INT);
+ if (ret > 0)
+ drv_info->notif_pend_irq = ret;
- drv_info->sched_recv_irq = irq;
+ if (!drv_info->sched_recv_irq && !drv_info->notif_pend_irq)
+ goto cleanup;
- ret = ffa_init_pcpu_irq(irq);
+ ret = ffa_init_pcpu_irq();
if (ret)
goto cleanup;
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index a7bc4796519c..fd59f58ce8a2 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -10,7 +10,8 @@ scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o
-scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o
+scmi-protocols-y := base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o
+scmi-protocols-y += pinctrl.o
scmi-module-objs := $(scmi-driver-y) $(scmi-protocols-y) $(scmi-transport-y)
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-core.o
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 6affbfdd1dec..b5ac25dbc1ca 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -301,6 +301,17 @@ extern const struct scmi_desc scmi_optee_desc;
void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv);
+enum scmi_bad_msg {
+ MSG_UNEXPECTED = -1,
+ MSG_INVALID = -2,
+ MSG_UNKNOWN = -3,
+ MSG_NOMEM = -4,
+ MSG_MBOX_SPURIOUS = -5,
+};
+
+void scmi_bad_message_trace(struct scmi_chan_info *cinfo, u32 msg_hdr,
+ enum scmi_bad_msg err);
+
/* shmem related declarations */
struct scmi_shared_mem;
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 2709598f3008..6b6957f4743f 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -33,6 +33,7 @@
#include <linux/processor.h>
#include <linux/refcount.h>
#include <linux/slab.h>
+#include <linux/xarray.h>
#include "common.h"
#include "notify.h"
@@ -44,8 +45,7 @@
static DEFINE_IDA(scmi_id);
-static DEFINE_IDR(scmi_protocols);
-static DEFINE_SPINLOCK(protocol_lock);
+static DEFINE_XARRAY(scmi_protocols);
/* List of all SCMI devices active in system */
static LIST_HEAD(scmi_list);
@@ -194,11 +194,94 @@ struct scmi_info {
#define bus_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, bus_nb)
#define req_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, dev_req_nb)
-static const struct scmi_protocol *scmi_protocol_get(int protocol_id)
+static unsigned long
+scmi_vendor_protocol_signature(unsigned int protocol_id, char *vendor_id,
+ char *sub_vendor_id, u32 impl_ver)
{
- const struct scmi_protocol *proto;
+ char *signature, *p;
+ unsigned long hash = 0;
- proto = idr_find(&scmi_protocols, protocol_id);
+ /* vendor_id/sub_vendor_id guaranteed <= SCMI_SHORT_NAME_MAX_SIZE */
+ signature = kasprintf(GFP_KERNEL, "%02X|%s|%s|0x%08X", protocol_id,
+ vendor_id ?: "", sub_vendor_id ?: "", impl_ver);
+ if (!signature)
+ return 0;
+
+ p = signature;
+ while (*p)
+ hash = partial_name_hash(tolower(*p++), hash);
+ hash = end_name_hash(hash);
+
+ kfree(signature);
+
+ return hash;
+}
+
+static unsigned long
+scmi_protocol_key_calculate(int protocol_id, char *vendor_id,
+ char *sub_vendor_id, u32 impl_ver)
+{
+ if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE)
+ return protocol_id;
+ else
+ return scmi_vendor_protocol_signature(protocol_id, vendor_id,
+ sub_vendor_id, impl_ver);
+}
+
+static const struct scmi_protocol *
+__scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id,
+ char *sub_vendor_id, u32 impl_ver)
+{
+ unsigned long key;
+ struct scmi_protocol *proto = NULL;
+
+ key = scmi_protocol_key_calculate(protocol_id, vendor_id,
+ sub_vendor_id, impl_ver);
+ if (key)
+ proto = xa_load(&scmi_protocols, key);
+
+ return proto;
+}
+
+static const struct scmi_protocol *
+scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id,
+ char *sub_vendor_id, u32 impl_ver)
+{
+ const struct scmi_protocol *proto = NULL;
+
+ /* Searching for closest match ...*/
+ proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id,
+ sub_vendor_id, impl_ver);
+ if (proto)
+ return proto;
+
+ /* Any match just on vendor/sub_vendor ? */
+ if (impl_ver) {
+ proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id,
+ sub_vendor_id, 0);
+ if (proto)
+ return proto;
+ }
+
+ /* Any match just on the vendor ? */
+ if (sub_vendor_id)
+ proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id,
+ NULL, 0);
+ return proto;
+}
+
+static const struct scmi_protocol *
+scmi_protocol_get(int protocol_id, struct scmi_revision_info *version)
+{
+ const struct scmi_protocol *proto = NULL;
+
+ if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE)
+ proto = xa_load(&scmi_protocols, protocol_id);
+ else
+ proto = scmi_vendor_protocol_lookup(protocol_id,
+ version->vendor_id,
+ version->sub_vendor_id,
+ version->impl_ver);
if (!proto || !try_module_get(proto->owner)) {
pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
return NULL;
@@ -206,21 +289,46 @@ static const struct scmi_protocol *scmi_protocol_get(int protocol_id)
pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
+ if (protocol_id >= SCMI_PROTOCOL_VENDOR_BASE)
+ pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n",
+ protocol_id, proto->vendor_id ?: "",
+ proto->sub_vendor_id ?: "", proto->impl_ver);
+
return proto;
}
-static void scmi_protocol_put(int protocol_id)
+static void scmi_protocol_put(const struct scmi_protocol *proto)
{
- const struct scmi_protocol *proto;
-
- proto = idr_find(&scmi_protocols, protocol_id);
if (proto)
module_put(proto->owner);
}
+static int scmi_vendor_protocol_check(const struct scmi_protocol *proto)
+{
+ if (!proto->vendor_id) {
+ pr_err("missing vendor_id for protocol 0x%x\n", proto->id);
+ return -EINVAL;
+ }
+
+ if (strlen(proto->vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) {
+ pr_err("malformed vendor_id for protocol 0x%x\n", proto->id);
+ return -EINVAL;
+ }
+
+ if (proto->sub_vendor_id &&
+ strlen(proto->sub_vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) {
+ pr_err("malformed sub_vendor_id for protocol 0x%x\n",
+ proto->id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int scmi_protocol_register(const struct scmi_protocol *proto)
{
int ret;
+ unsigned long key;
if (!proto) {
pr_err("invalid protocol\n");
@@ -232,12 +340,23 @@ int scmi_protocol_register(const struct scmi_protocol *proto)
return -EINVAL;
}
- spin_lock(&protocol_lock);
- ret = idr_alloc(&scmi_protocols, (void *)proto,
- proto->id, proto->id + 1, GFP_ATOMIC);
- spin_unlock(&protocol_lock);
- if (ret != proto->id) {
- pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n",
+ if (proto->id >= SCMI_PROTOCOL_VENDOR_BASE &&
+ scmi_vendor_protocol_check(proto))
+ return -EINVAL;
+
+ /*
+ * Calculate a protocol key to register this protocol with the core;
+ * key value 0 is considered invalid.
+ */
+ key = scmi_protocol_key_calculate(proto->id, proto->vendor_id,
+ proto->sub_vendor_id,
+ proto->impl_ver);
+ if (!key)
+ return -EINVAL;
+
+ ret = xa_insert(&scmi_protocols, key, (void *)proto, GFP_KERNEL);
+ if (ret) {
+ pr_err("unable to allocate SCMI protocol slot for 0x%x - err %d\n",
proto->id, ret);
return ret;
}
@@ -250,9 +369,15 @@ EXPORT_SYMBOL_GPL(scmi_protocol_register);
void scmi_protocol_unregister(const struct scmi_protocol *proto)
{
- spin_lock(&protocol_lock);
- idr_remove(&scmi_protocols, proto->id);
- spin_unlock(&protocol_lock);
+ unsigned long key;
+
+ key = scmi_protocol_key_calculate(proto->id, proto->vendor_id,
+ proto->sub_vendor_id,
+ proto->impl_ver);
+ if (!key)
+ return;
+
+ xa_erase(&scmi_protocols, key);
pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
}
@@ -697,6 +822,45 @@ scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
}
/**
+ * scmi_bad_message_trace - A helper to trace weird messages
+ *
+ * @cinfo: A reference to the channel descriptor on which the message was
+ * received
+ * @msg_hdr: Message header to track
+ * @err: A specific error code used as a status value in traces.
+ *
+ * This helper can be used to trace any kind of weird, incomplete, unexpected,
+ * timed-out message that arrives and as such, can be traced only referring to
+ * the header content, since the payload is missing/unreliable.
+ */
+void scmi_bad_message_trace(struct scmi_chan_info *cinfo, u32 msg_hdr,
+ enum scmi_bad_msg err)
+{
+ char *tag;
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+
+ switch (MSG_XTRACT_TYPE(msg_hdr)) {
+ case MSG_TYPE_COMMAND:
+ tag = "!RESP";
+ break;
+ case MSG_TYPE_DELAYED_RESP:
+ tag = "!DLYD";
+ break;
+ case MSG_TYPE_NOTIFICATION:
+ tag = "!NOTI";
+ break;
+ default:
+ tag = "!UNKN";
+ break;
+ }
+
+ trace_scmi_msg_dump(info->id, cinfo->id,
+ MSG_XTRACT_PROT_ID(msg_hdr),
+ MSG_XTRACT_ID(msg_hdr), tag,
+ MSG_XTRACT_TOKEN(msg_hdr), err, NULL, 0);
+}
+
+/**
* scmi_msg_response_validate - Validate message type against state of related
* xfer
*
@@ -822,6 +986,9 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
"Message for %d type %d is not expected!\n",
xfer_id, msg_type);
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED);
+
return xfer;
}
refcount_inc(&xfer->users);
@@ -846,6 +1013,9 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
dev_err(cinfo->dev,
"Invalid message type:%d for %d - HDR:0x%X state:%d\n",
msg_type, xfer_id, msg_hdr, xfer->state);
+
+ scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID);
+
/* On error the refcount incremented above has to be dropped */
__scmi_xfer_put(minfo, xfer);
xfer = ERR_PTR(-EINVAL);
@@ -882,6 +1052,9 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
if (IS_ERR(xfer)) {
dev_err(dev, "failed to get free message slot (%ld)\n",
PTR_ERR(xfer));
+
+ scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM);
+
scmi_clear_channel(info, cinfo);
return;
}
@@ -1001,6 +1174,7 @@ void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv)
break;
default:
WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
+ scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNKNOWN);
break;
}
}
@@ -1489,6 +1663,20 @@ out:
}
/**
+ * scmi_common_get_max_msg_size - Get maximum message size
+ * @ph: A protocol handle reference.
+ *
+ * Return: Maximum message size for the current protocol.
+ */
+static int scmi_common_get_max_msg_size(const struct scmi_protocol_handle *ph)
+{
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
+
+ return info->desc->max_msg_size;
+}
+
+/**
* struct scmi_iterator - Iterator descriptor
* @msg: A reference to the message TX buffer; filled by @prepare_message with
* a proper custom command payload for each multi-part command request.
@@ -1799,6 +1987,7 @@ static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph,
static const struct scmi_proto_helpers_ops helpers_ops = {
.extended_name_get = scmi_common_extended_name_get,
+ .get_max_msg_size = scmi_common_get_max_msg_size,
.iter_response_init = scmi_iterator_init,
.iter_response_run = scmi_iterator_run,
.protocol_msg_check = scmi_protocol_msg_check,
@@ -1891,7 +2080,7 @@ scmi_alloc_init_protocol_instance(struct scmi_info *info,
/* Protocol specific devres group */
gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
if (!gid) {
- scmi_protocol_put(proto->id);
+ scmi_protocol_put(proto);
goto out;
}
@@ -1955,7 +2144,7 @@ scmi_alloc_init_protocol_instance(struct scmi_info *info,
clean:
/* Take care to put the protocol module's owner before releasing all */
- scmi_protocol_put(proto->id);
+ scmi_protocol_put(proto);
devres_release_group(handle->dev, gid);
out:
return ERR_PTR(ret);
@@ -1989,7 +2178,7 @@ scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
const struct scmi_protocol *proto;
/* Fails if protocol not registered on bus */
- proto = scmi_protocol_get(protocol_id);
+ proto = scmi_protocol_get(protocol_id, &info->version);
if (proto)
pi = scmi_alloc_init_protocol_instance(info, proto);
else
@@ -2044,7 +2233,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
idr_remove(&info->protocols, protocol_id);
- scmi_protocol_put(protocol_id);
+ scmi_protocol_put(pi->proto);
devres_release_group(handle->dev, gid);
dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
@@ -2491,6 +2680,10 @@ scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node,
ret = 0;
}
+ if (ret)
+ dev_err(info->dev,
+ "failed to setup channel for protocol:0x%X\n", prot_id);
+
return ret;
}
@@ -2760,6 +2953,7 @@ static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
static int scmi_probe(struct platform_device *pdev)
{
int ret;
+ char *err_str = "probe failure\n";
struct scmi_handle *handle;
const struct scmi_desc *desc;
struct scmi_info *info;
@@ -2810,27 +3004,37 @@ static int scmi_probe(struct platform_device *pdev)
if (desc->ops->link_supplier) {
ret = desc->ops->link_supplier(dev);
- if (ret)
+ if (ret) {
+ err_str = "transport not ready\n";
goto clear_ida;
+ }
}
/* Setup all channels described in the DT at first */
ret = scmi_channels_setup(info);
- if (ret)
+ if (ret) {
+ err_str = "failed to setup channels\n";
goto clear_ida;
+ }
ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb);
- if (ret)
+ if (ret) {
+ err_str = "failed to register bus notifier\n";
goto clear_txrx_setup;
+ }
ret = blocking_notifier_chain_register(&scmi_requested_devices_nh,
&info->dev_req_nb);
- if (ret)
+ if (ret) {
+ err_str = "failed to register device notifier\n";
goto clear_bus_notifier;
+ }
ret = scmi_xfer_info_init(info);
- if (ret)
+ if (ret) {
+ err_str = "failed to init xfers pool\n";
goto clear_dev_req_notifier;
+ }
if (scmi_top_dentry) {
info->dbg = scmi_debugfs_common_setup(info);
@@ -2867,9 +3071,11 @@ static int scmi_probe(struct platform_device *pdev)
*/
ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
if (ret) {
- dev_err(dev, "unable to communicate with SCMI\n");
- if (coex)
+ err_str = "unable to communicate with SCMI\n";
+ if (coex) {
+ dev_err(dev, "%s", err_str);
return 0;
+ }
goto notification_exit;
}
@@ -2923,7 +3129,8 @@ clear_txrx_setup:
scmi_cleanup_txrx_channels(info);
clear_ida:
ida_free(&scmi_id, info->id);
- return ret;
+
+ return dev_err_probe(dev, ret, "%s", err_str);
}
static void scmi_remove(struct platform_device *pdev)
@@ -3127,6 +3334,7 @@ static int __init scmi_driver_init(void)
scmi_voltage_register();
scmi_system_register();
scmi_powercap_register();
+ scmi_pinctrl_register();
return platform_driver_register(&scmi_driver);
}
@@ -3144,6 +3352,7 @@ static void __exit scmi_driver_exit(void)
scmi_voltage_unregister();
scmi_system_unregister();
scmi_powercap_unregister();
+ scmi_pinctrl_unregister();
scmi_transports_exit();
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
index b8d470417e8f..615a3b2ad83d 100644
--- a/drivers/firmware/arm_scmi/mailbox.c
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -56,6 +56,9 @@ static void rx_callback(struct mbox_client *cl, void *m)
*/
if (cl->knows_txdone && !shmem_channel_free(smbox->shmem)) {
dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n");
+ scmi_bad_message_trace(smbox->cinfo,
+ shmem_read_header(smbox->shmem),
+ MSG_MBOX_SPURIOUS);
return;
}
diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
index 27c52531194d..e160ecb22948 100644
--- a/drivers/firmware/arm_scmi/notify.c
+++ b/drivers/firmware/arm_scmi/notify.c
@@ -1513,17 +1513,12 @@ static int scmi_devm_notifier_register(struct scmi_device *sdev,
static int scmi_devm_notifier_match(struct device *dev, void *res, void *data)
{
struct scmi_notifier_devres *dres = res;
- struct scmi_notifier_devres *xres = data;
+ struct notifier_block *nb = data;
- if (WARN_ON(!dres || !xres))
+ if (WARN_ON(!dres || !nb))
return 0;
- return dres->proto_id == xres->proto_id &&
- dres->evt_id == xres->evt_id &&
- dres->nb == xres->nb &&
- ((!dres->src_id && !xres->src_id) ||
- (dres->src_id && xres->src_id &&
- dres->__src_id == xres->__src_id));
+ return dres->nb == nb;
}
/**
@@ -1531,10 +1526,6 @@ static int scmi_devm_notifier_match(struct device *dev, void *res, void *data)
* notifier_block for an event
* @sdev: A reference to an scmi_device whose embedded struct device is to
* be used for devres accounting.
- * @proto_id: Protocol ID
- * @evt_id: Event ID
- * @src_id: Source ID, when NULL register for events coming form ALL possible
- * sources
* @nb: A standard notifier block to register for the specified event
*
* Generic devres managed helper to explicitly un-register a notifier_block
@@ -1544,25 +1535,12 @@ static int scmi_devm_notifier_match(struct device *dev, void *res, void *data)
* Return: 0 on Success
*/
static int scmi_devm_notifier_unregister(struct scmi_device *sdev,
- u8 proto_id, u8 evt_id,
- const u32 *src_id,
struct notifier_block *nb)
{
int ret;
- struct scmi_notifier_devres dres;
-
- dres.handle = sdev->handle;
- dres.proto_id = proto_id;
- dres.evt_id = evt_id;
- if (src_id) {
- dres.__src_id = *src_id;
- dres.src_id = &dres.__src_id;
- } else {
- dres.src_id = NULL;
- }
ret = devres_release(&sdev->dev, scmi_devm_release_notifier,
- scmi_devm_notifier_match, &dres);
+ scmi_devm_notifier_match, nb);
WARN_ON(ret);
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 345fff167b52..4b7f1cbb9b04 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -387,8 +387,8 @@ process_response_opp(struct device *dev, struct perf_dom_info *dom,
ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
if (ret)
- dev_warn(dev, "Failed to add opps_by_lvl at %d - ret:%d\n",
- opp->perf, ret);
+ dev_warn(dev, "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
+ opp->perf, dom->info.name, ret);
}
static inline void
@@ -405,8 +405,8 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
if (ret)
- dev_warn(dev, "Failed to add opps_by_lvl at %d - ret:%d\n",
- opp->perf, ret);
+ dev_warn(dev, "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
+ opp->perf, dom->info.name, ret);
/* Note that PERF v4 reports always five 32-bit words */
opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
@@ -417,8 +417,8 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
GFP_KERNEL);
if (ret)
dev_warn(dev,
- "Failed to add opps_by_idx at %d - ret:%d\n",
- opp->level_index, ret);
+ "Failed to add opps_by_idx at %d for %s - ret:%d\n",
+ opp->level_index, dom->info.name, ret);
hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
}
@@ -879,7 +879,8 @@ static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
ret = dev_pm_opp_add_dynamic(dev, &data);
if (ret) {
- dev_warn(dev, "failed to add opp %luHz\n", freq);
+ dev_warn(dev, "[%d][%s]: Failed to add OPP[%d] %lu\n",
+ domain, dom->info.name, idx, freq);
dev_pm_opp_remove_all_dynamic(dev);
return ret;
}
diff --git a/drivers/firmware/arm_scmi/pinctrl.c b/drivers/firmware/arm_scmi/pinctrl.c
new file mode 100644
index 000000000000..a2a7f880d6a3
--- /dev/null
+++ b/drivers/firmware/arm_scmi/pinctrl.c
@@ -0,0 +1,916 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Pinctrl Protocol
+ *
+ * Copyright (C) 2024 EPAM
+ * Copyright 2024 NXP
+ */
+
+#include <asm/byteorder.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/scmi_protocol.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "common.h"
+#include "protocols.h"
+
+/* Updated only after ALL the mandatory features for that version are merged */
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000
+
+#define GET_GROUPS_NR(x) le32_get_bits((x), GENMASK(31, 16))
+#define GET_PINS_NR(x) le32_get_bits((x), GENMASK(15, 0))
+#define GET_FUNCTIONS_NR(x) le32_get_bits((x), GENMASK(15, 0))
+
+#define EXT_NAME_FLAG(x) le32_get_bits((x), BIT(31))
+#define NUM_ELEMS(x) le32_get_bits((x), GENMASK(15, 0))
+
+#define REMAINING(x) le32_get_bits((x), GENMASK(31, 16))
+#define RETURNED(x) le32_get_bits((x), GENMASK(11, 0))
+
+#define CONFIG_FLAG_MASK GENMASK(19, 18)
+#define SELECTOR_MASK GENMASK(17, 16)
+#define SKIP_CONFIGS_MASK GENMASK(15, 8)
+#define CONFIG_TYPE_MASK GENMASK(7, 0)
+
+enum scmi_pinctrl_protocol_cmd {
+ PINCTRL_ATTRIBUTES = 0x3,
+ PINCTRL_LIST_ASSOCIATIONS = 0x4,
+ PINCTRL_SETTINGS_GET = 0x5,
+ PINCTRL_SETTINGS_CONFIGURE = 0x6,
+ PINCTRL_REQUEST = 0x7,
+ PINCTRL_RELEASE = 0x8,
+ PINCTRL_NAME_GET = 0x9,
+ PINCTRL_SET_PERMISSIONS = 0xa,
+};
+
+struct scmi_msg_settings_conf {
+ __le32 identifier;
+ __le32 function_id;
+ __le32 attributes;
+ __le32 configs[];
+};
+
+struct scmi_msg_settings_get {
+ __le32 identifier;
+ __le32 attributes;
+};
+
+struct scmi_resp_settings_get {
+ __le32 function_selected;
+ __le32 num_configs;
+ __le32 configs[];
+};
+
+struct scmi_msg_pinctrl_protocol_attributes {
+ __le32 attributes_low;
+ __le32 attributes_high;
+};
+
+struct scmi_msg_pinctrl_attributes {
+ __le32 identifier;
+ __le32 flags;
+};
+
+struct scmi_resp_pinctrl_attributes {
+ __le32 attributes;
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
+};
+
+struct scmi_msg_pinctrl_list_assoc {
+ __le32 identifier;
+ __le32 flags;
+ __le32 index;
+};
+
+struct scmi_resp_pinctrl_list_assoc {
+ __le32 flags;
+ __le16 array[];
+};
+
+struct scmi_msg_request {
+ __le32 identifier;
+ __le32 flags;
+};
+
+struct scmi_group_info {
+ char name[SCMI_MAX_STR_SIZE];
+ bool present;
+ u32 *group_pins;
+ u32 nr_pins;
+};
+
+struct scmi_function_info {
+ char name[SCMI_MAX_STR_SIZE];
+ bool present;
+ u32 *groups;
+ u32 nr_groups;
+};
+
+struct scmi_pin_info {
+ char name[SCMI_MAX_STR_SIZE];
+ bool present;
+};
+
+struct scmi_pinctrl_info {
+ u32 version;
+ int nr_groups;
+ int nr_functions;
+ int nr_pins;
+ struct scmi_group_info *groups;
+ struct scmi_function_info *functions;
+ struct scmi_pin_info *pins;
+};
+
+static int scmi_pinctrl_attributes_get(const struct scmi_protocol_handle *ph,
+ struct scmi_pinctrl_info *pi)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_pinctrl_protocol_attributes *attr;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0, sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ pi->nr_functions = GET_FUNCTIONS_NR(attr->attributes_high);
+ pi->nr_groups = GET_GROUPS_NR(attr->attributes_low);
+ pi->nr_pins = GET_PINS_NR(attr->attributes_low);
+ if (pi->nr_pins == 0) {
+ dev_warn(ph->dev, "returned zero pins\n");
+ ret = -EINVAL;
+ }
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_pinctrl_count_get(const struct scmi_protocol_handle *ph,
+ enum scmi_pinctrl_selector_type type)
+{
+ struct scmi_pinctrl_info *pi = ph->get_priv(ph);
+
+ switch (type) {
+ case PIN_TYPE:
+ return pi->nr_pins;
+ case GROUP_TYPE:
+ return pi->nr_groups;
+ case FUNCTION_TYPE:
+ return pi->nr_functions;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int scmi_pinctrl_validate_id(const struct scmi_protocol_handle *ph,
+ u32 selector,
+ enum scmi_pinctrl_selector_type type)
+{
+ int value;
+
+ value = scmi_pinctrl_count_get(ph, type);
+ if (value < 0)
+ return value;
+
+ if (selector >= value || value == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int scmi_pinctrl_attributes(const struct scmi_protocol_handle *ph,
+ enum scmi_pinctrl_selector_type type,
+ u32 selector, char *name,
+ u32 *n_elems)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_pinctrl_attributes *tx;
+ struct scmi_resp_pinctrl_attributes *rx;
+ bool ext_name_flag;
+
+ if (!name)
+ return -EINVAL;
+
+ ret = scmi_pinctrl_validate_id(ph, selector, type);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, PINCTRL_ATTRIBUTES, sizeof(*tx),
+ sizeof(*rx), &t);
+ if (ret)
+ return ret;
+
+ tx = t->tx.buf;
+ rx = t->rx.buf;
+ tx->identifier = cpu_to_le32(selector);
+ tx->flags = cpu_to_le32(type);
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ if (n_elems)
+ *n_elems = NUM_ELEMS(rx->attributes);
+
+ strscpy(name, rx->name, SCMI_SHORT_NAME_MAX_SIZE);
+
+ ext_name_flag = !!EXT_NAME_FLAG(rx->attributes);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ if (ret)
+ return ret;
+ /*
+ * If supported overwrite short name with the extended one;
+ * on error just carry on and use already provided short name.
+ */
+ if (ext_name_flag)
+ ret = ph->hops->extended_name_get(ph, PINCTRL_NAME_GET,
+ selector, (u32 *)&type, name,
+ SCMI_MAX_STR_SIZE);
+ return ret;
+}
+
+struct scmi_pinctrl_ipriv {
+ u32 selector;
+ enum scmi_pinctrl_selector_type type;
+ u32 *array;
+};
+
+static void iter_pinctrl_assoc_prepare_message(void *message,
+ u32 desc_index,
+ const void *priv)
+{
+ struct scmi_msg_pinctrl_list_assoc *msg = message;
+ const struct scmi_pinctrl_ipriv *p = priv;
+
+ msg->identifier = cpu_to_le32(p->selector);
+ msg->flags = cpu_to_le32(p->type);
+ msg->index = cpu_to_le32(desc_index);
+}
+
+static int iter_pinctrl_assoc_update_state(struct scmi_iterator_state *st,
+ const void *response, void *priv)
+{
+ const struct scmi_resp_pinctrl_list_assoc *r = response;
+
+ st->num_returned = RETURNED(r->flags);
+ st->num_remaining = REMAINING(r->flags);
+
+ return 0;
+}
+
+static int
+iter_pinctrl_assoc_process_response(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st, void *priv)
+{
+ const struct scmi_resp_pinctrl_list_assoc *r = response;
+ struct scmi_pinctrl_ipriv *p = priv;
+
+ p->array[st->desc_index + st->loop_idx] =
+ le16_to_cpu(r->array[st->loop_idx]);
+
+ return 0;
+}
+
+static int scmi_pinctrl_list_associations(const struct scmi_protocol_handle *ph,
+ u32 selector,
+ enum scmi_pinctrl_selector_type type,
+ u16 size, u32 *array)
+{
+ int ret;
+ void *iter;
+ struct scmi_iterator_ops ops = {
+ .prepare_message = iter_pinctrl_assoc_prepare_message,
+ .update_state = iter_pinctrl_assoc_update_state,
+ .process_response = iter_pinctrl_assoc_process_response,
+ };
+ struct scmi_pinctrl_ipriv ipriv = {
+ .selector = selector,
+ .type = type,
+ .array = array,
+ };
+
+ if (!array || !size || type == PIN_TYPE)
+ return -EINVAL;
+
+ ret = scmi_pinctrl_validate_id(ph, selector, type);
+ if (ret)
+ return ret;
+
+ iter = ph->hops->iter_response_init(ph, &ops, size,
+ PINCTRL_LIST_ASSOCIATIONS,
+ sizeof(struct scmi_msg_pinctrl_list_assoc),
+ &ipriv);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ return ph->hops->iter_response_run(iter);
+}
+
+struct scmi_settings_get_ipriv {
+ u32 selector;
+ enum scmi_pinctrl_selector_type type;
+ bool get_all;
+ unsigned int *nr_configs;
+ enum scmi_pinctrl_conf_type *config_types;
+ u32 *config_values;
+};
+
+static void
+iter_pinctrl_settings_get_prepare_message(void *message, u32 desc_index,
+ const void *priv)
+{
+ struct scmi_msg_settings_get *msg = message;
+ const struct scmi_settings_get_ipriv *p = priv;
+ u32 attributes;
+
+ attributes = FIELD_PREP(SELECTOR_MASK, p->type);
+
+ if (p->get_all) {
+ attributes |= FIELD_PREP(CONFIG_FLAG_MASK, 1) |
+ FIELD_PREP(SKIP_CONFIGS_MASK, desc_index);
+ } else {
+ attributes |= FIELD_PREP(CONFIG_TYPE_MASK, p->config_types[0]);
+ }
+
+ msg->attributes = cpu_to_le32(attributes);
+ msg->identifier = cpu_to_le32(p->selector);
+}
+
+static int
+iter_pinctrl_settings_get_update_state(struct scmi_iterator_state *st,
+ const void *response, void *priv)
+{
+ const struct scmi_resp_settings_get *r = response;
+ struct scmi_settings_get_ipriv *p = priv;
+
+ if (p->get_all) {
+ st->num_returned = le32_get_bits(r->num_configs, GENMASK(7, 0));
+ st->num_remaining = le32_get_bits(r->num_configs, GENMASK(31, 24));
+ } else {
+ st->num_returned = 1;
+ st->num_remaining = 0;
+ }
+
+ return 0;
+}
+
+static int
+iter_pinctrl_settings_get_process_response(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st,
+ void *priv)
+{
+ const struct scmi_resp_settings_get *r = response;
+ struct scmi_settings_get_ipriv *p = priv;
+ u32 type = le32_get_bits(r->configs[st->loop_idx * 2], GENMASK(7, 0));
+ u32 val = le32_to_cpu(r->configs[st->loop_idx * 2 + 1]);
+
+ if (p->get_all) {
+ p->config_types[st->desc_index + st->loop_idx] = type;
+ } else {
+ if (p->config_types[0] != type)
+ return -EINVAL;
+ }
+
+ p->config_values[st->desc_index + st->loop_idx] = val;
+ ++*p->nr_configs;
+
+ return 0;
+}
+
+static int
+scmi_pinctrl_settings_get(const struct scmi_protocol_handle *ph, u32 selector,
+ enum scmi_pinctrl_selector_type type,
+ unsigned int *nr_configs,
+ enum scmi_pinctrl_conf_type *config_types,
+ u32 *config_values)
+{
+ int ret;
+ void *iter;
+ unsigned int max_configs = *nr_configs;
+ struct scmi_iterator_ops ops = {
+ .prepare_message = iter_pinctrl_settings_get_prepare_message,
+ .update_state = iter_pinctrl_settings_get_update_state,
+ .process_response = iter_pinctrl_settings_get_process_response,
+ };
+ struct scmi_settings_get_ipriv ipriv = {
+ .selector = selector,
+ .type = type,
+ .get_all = (max_configs > 1),
+ .nr_configs = nr_configs,
+ .config_types = config_types,
+ .config_values = config_values,
+ };
+
+ if (!config_types || !config_values || type == FUNCTION_TYPE)
+ return -EINVAL;
+
+ ret = scmi_pinctrl_validate_id(ph, selector, type);
+ if (ret)
+ return ret;
+
+ /* Prepare to count returned configs */
+ *nr_configs = 0;
+ iter = ph->hops->iter_response_init(ph, &ops, max_configs,
+ PINCTRL_SETTINGS_GET,
+ sizeof(struct scmi_msg_settings_get),
+ &ipriv);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ return ph->hops->iter_response_run(iter);
+}
+
+static int scmi_pinctrl_settings_get_one(const struct scmi_protocol_handle *ph,
+ u32 selector,
+ enum scmi_pinctrl_selector_type type,
+ enum scmi_pinctrl_conf_type config_type,
+ u32 *config_value)
+{
+ unsigned int nr_configs = 1;
+
+ return scmi_pinctrl_settings_get(ph, selector, type, &nr_configs,
+ &config_type, config_value);
+}
+
+static int scmi_pinctrl_settings_get_all(const struct scmi_protocol_handle *ph,
+ u32 selector,
+ enum scmi_pinctrl_selector_type type,
+ unsigned int *nr_configs,
+ enum scmi_pinctrl_conf_type *config_types,
+ u32 *config_values)
+{
+ if (!nr_configs || *nr_configs == 0)
+ return -EINVAL;
+
+ return scmi_pinctrl_settings_get(ph, selector, type, nr_configs,
+ config_types, config_values);
+}
+
+static int
+scmi_pinctrl_settings_conf(const struct scmi_protocol_handle *ph,
+ u32 selector,
+ enum scmi_pinctrl_selector_type type,
+ u32 nr_configs,
+ enum scmi_pinctrl_conf_type *config_type,
+ u32 *config_value)
+{
+ struct scmi_xfer *t;
+ struct scmi_msg_settings_conf *tx;
+ u32 attributes;
+ int ret, i;
+ u32 configs_in_chunk, conf_num = 0;
+ u32 chunk;
+ int max_msg_size = ph->hops->get_max_msg_size(ph);
+
+ if (!config_type || !config_value || type == FUNCTION_TYPE)
+ return -EINVAL;
+
+ ret = scmi_pinctrl_validate_id(ph, selector, type);
+ if (ret)
+ return ret;
+
+ configs_in_chunk = (max_msg_size - sizeof(*tx)) / (sizeof(__le32) * 2);
+ while (conf_num < nr_configs) {
+ chunk = (nr_configs - conf_num > configs_in_chunk) ?
+ configs_in_chunk : nr_configs - conf_num;
+
+ ret = ph->xops->xfer_get_init(ph, PINCTRL_SETTINGS_CONFIGURE,
+ sizeof(*tx) +
+ chunk * 2 * sizeof(__le32), 0, &t);
+ if (ret)
+ break;
+
+ tx = t->tx.buf;
+ tx->identifier = cpu_to_le32(selector);
+ tx->function_id = cpu_to_le32(0xFFFFFFFF);
+ attributes = FIELD_PREP(GENMASK(1, 0), type) |
+ FIELD_PREP(GENMASK(9, 2), chunk);
+ tx->attributes = cpu_to_le32(attributes);
+
+ for (i = 0; i < chunk; i++) {
+ tx->configs[i * 2] =
+ cpu_to_le32(config_type[conf_num + i]);
+ tx->configs[i * 2 + 1] =
+ cpu_to_le32(config_value[conf_num + i]);
+ }
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ if (ret)
+ break;
+
+ conf_num += chunk;
+ }
+
+ return ret;
+}
+
+static int scmi_pinctrl_function_select(const struct scmi_protocol_handle *ph,
+ u32 group,
+ enum scmi_pinctrl_selector_type type,
+ u32 function_id)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_settings_conf *tx;
+ u32 attributes;
+
+ ret = scmi_pinctrl_validate_id(ph, group, type);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, PINCTRL_SETTINGS_CONFIGURE,
+ sizeof(*tx), 0, &t);
+ if (ret)
+ return ret;
+
+ tx = t->tx.buf;
+ tx->identifier = cpu_to_le32(group);
+ tx->function_id = cpu_to_le32(function_id);
+ attributes = FIELD_PREP(GENMASK(1, 0), type) | BIT(10);
+ tx->attributes = cpu_to_le32(attributes);
+
+ ret = ph->xops->do_xfer(ph, t);
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_pinctrl_request_free(const struct scmi_protocol_handle *ph,
+ u32 identifier,
+ enum scmi_pinctrl_selector_type type,
+ enum scmi_pinctrl_protocol_cmd cmd)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_request *tx;
+
+ if (type == FUNCTION_TYPE)
+ return -EINVAL;
+
+ if (cmd != PINCTRL_REQUEST && cmd != PINCTRL_RELEASE)
+ return -EINVAL;
+
+ ret = scmi_pinctrl_validate_id(ph, identifier, type);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, cmd, sizeof(*tx), 0, &t);
+ if (ret)
+ return ret;
+
+ tx = t->tx.buf;
+ tx->identifier = cpu_to_le32(identifier);
+ tx->flags = cpu_to_le32(type);
+
+ ret = ph->xops->do_xfer(ph, t);
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_pinctrl_pin_request(const struct scmi_protocol_handle *ph,
+ u32 pin)
+{
+ return scmi_pinctrl_request_free(ph, pin, PIN_TYPE, PINCTRL_REQUEST);
+}
+
+static int scmi_pinctrl_pin_free(const struct scmi_protocol_handle *ph, u32 pin)
+{
+ return scmi_pinctrl_request_free(ph, pin, PIN_TYPE, PINCTRL_RELEASE);
+}
+
+static int scmi_pinctrl_get_group_info(const struct scmi_protocol_handle *ph,
+ u32 selector,
+ struct scmi_group_info *group)
+{
+ int ret;
+
+ ret = scmi_pinctrl_attributes(ph, GROUP_TYPE, selector, group->name,
+ &group->nr_pins);
+ if (ret)
+ return ret;
+
+ if (!group->nr_pins) {
+ dev_err(ph->dev, "Group %d has 0 elements", selector);
+ return -ENODATA;
+ }
+
+ group->group_pins = kmalloc_array(group->nr_pins,
+ sizeof(*group->group_pins),
+ GFP_KERNEL);
+ if (!group->group_pins)
+ return -ENOMEM;
+
+ ret = scmi_pinctrl_list_associations(ph, selector, GROUP_TYPE,
+ group->nr_pins, group->group_pins);
+ if (ret) {
+ kfree(group->group_pins);
+ return ret;
+ }
+
+ group->present = true;
+ return 0;
+}
+
+static int scmi_pinctrl_get_group_name(const struct scmi_protocol_handle *ph,
+ u32 selector, const char **name)
+{
+ struct scmi_pinctrl_info *pi = ph->get_priv(ph);
+
+ if (!name)
+ return -EINVAL;
+
+ if (selector >= pi->nr_groups || pi->nr_groups == 0)
+ return -EINVAL;
+
+ if (!pi->groups[selector].present) {
+ int ret;
+
+ ret = scmi_pinctrl_get_group_info(ph, selector,
+ &pi->groups[selector]);
+ if (ret)
+ return ret;
+ }
+
+ *name = pi->groups[selector].name;
+
+ return 0;
+}
+
+static int scmi_pinctrl_group_pins_get(const struct scmi_protocol_handle *ph,
+ u32 selector, const u32 **pins,
+ u32 *nr_pins)
+{
+ struct scmi_pinctrl_info *pi = ph->get_priv(ph);
+
+ if (!pins || !nr_pins)
+ return -EINVAL;
+
+ if (selector >= pi->nr_groups || pi->nr_groups == 0)
+ return -EINVAL;
+
+ if (!pi->groups[selector].present) {
+ int ret;
+
+ ret = scmi_pinctrl_get_group_info(ph, selector,
+ &pi->groups[selector]);
+ if (ret)
+ return ret;
+ }
+
+ *pins = pi->groups[selector].group_pins;
+ *nr_pins = pi->groups[selector].nr_pins;
+
+ return 0;
+}
+
+static int scmi_pinctrl_get_function_info(const struct scmi_protocol_handle *ph,
+ u32 selector,
+ struct scmi_function_info *func)
+{
+ int ret;
+
+ ret = scmi_pinctrl_attributes(ph, FUNCTION_TYPE, selector, func->name,
+ &func->nr_groups);
+ if (ret)
+ return ret;
+
+ if (!func->nr_groups) {
+ dev_err(ph->dev, "Function %d has 0 elements", selector);
+ return -ENODATA;
+ }
+
+ func->groups = kmalloc_array(func->nr_groups, sizeof(*func->groups),
+ GFP_KERNEL);
+ if (!func->groups)
+ return -ENOMEM;
+
+ ret = scmi_pinctrl_list_associations(ph, selector, FUNCTION_TYPE,
+ func->nr_groups, func->groups);
+ if (ret) {
+ kfree(func->groups);
+ return ret;
+ }
+
+ func->present = true;
+ return 0;
+}
+
+static int scmi_pinctrl_get_function_name(const struct scmi_protocol_handle *ph,
+ u32 selector, const char **name)
+{
+ struct scmi_pinctrl_info *pi = ph->get_priv(ph);
+
+ if (!name)
+ return -EINVAL;
+
+ if (selector >= pi->nr_functions || pi->nr_functions == 0)
+ return -EINVAL;
+
+ if (!pi->functions[selector].present) {
+ int ret;
+
+ ret = scmi_pinctrl_get_function_info(ph, selector,
+ &pi->functions[selector]);
+ if (ret)
+ return ret;
+ }
+
+ *name = pi->functions[selector].name;
+ return 0;
+}
+
+static int
+scmi_pinctrl_function_groups_get(const struct scmi_protocol_handle *ph,
+ u32 selector, u32 *nr_groups,
+ const u32 **groups)
+{
+ struct scmi_pinctrl_info *pi = ph->get_priv(ph);
+
+ if (!groups || !nr_groups)
+ return -EINVAL;
+
+ if (selector >= pi->nr_functions || pi->nr_functions == 0)
+ return -EINVAL;
+
+ if (!pi->functions[selector].present) {
+ int ret;
+
+ ret = scmi_pinctrl_get_function_info(ph, selector,
+ &pi->functions[selector]);
+ if (ret)
+ return ret;
+ }
+
+ *groups = pi->functions[selector].groups;
+ *nr_groups = pi->functions[selector].nr_groups;
+
+ return 0;
+}
+
+static int scmi_pinctrl_mux_set(const struct scmi_protocol_handle *ph,
+ u32 selector, u32 group)
+{
+ return scmi_pinctrl_function_select(ph, group, GROUP_TYPE, selector);
+}
+
+static int scmi_pinctrl_get_pin_info(const struct scmi_protocol_handle *ph,
+ u32 selector, struct scmi_pin_info *pin)
+{
+ int ret;
+
+ if (!pin)
+ return -EINVAL;
+
+ ret = scmi_pinctrl_attributes(ph, PIN_TYPE, selector, pin->name, NULL);
+ if (ret)
+ return ret;
+
+ pin->present = true;
+ return 0;
+}
+
+static int scmi_pinctrl_get_pin_name(const struct scmi_protocol_handle *ph,
+ u32 selector, const char **name)
+{
+ struct scmi_pinctrl_info *pi = ph->get_priv(ph);
+
+ if (!name)
+ return -EINVAL;
+
+ if (selector >= pi->nr_pins)
+ return -EINVAL;
+
+ if (!pi->pins[selector].present) {
+ int ret;
+
+ ret = scmi_pinctrl_get_pin_info(ph, selector, &pi->pins[selector]);
+ if (ret)
+ return ret;
+ }
+
+ *name = pi->pins[selector].name;
+
+ return 0;
+}
+
+static int scmi_pinctrl_name_get(const struct scmi_protocol_handle *ph,
+ u32 selector,
+ enum scmi_pinctrl_selector_type type,
+ const char **name)
+{
+ switch (type) {
+ case PIN_TYPE:
+ return scmi_pinctrl_get_pin_name(ph, selector, name);
+ case GROUP_TYPE:
+ return scmi_pinctrl_get_group_name(ph, selector, name);
+ case FUNCTION_TYPE:
+ return scmi_pinctrl_get_function_name(ph, selector, name);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct scmi_pinctrl_proto_ops pinctrl_proto_ops = {
+ .count_get = scmi_pinctrl_count_get,
+ .name_get = scmi_pinctrl_name_get,
+ .group_pins_get = scmi_pinctrl_group_pins_get,
+ .function_groups_get = scmi_pinctrl_function_groups_get,
+ .mux_set = scmi_pinctrl_mux_set,
+ .settings_get_one = scmi_pinctrl_settings_get_one,
+ .settings_get_all = scmi_pinctrl_settings_get_all,
+ .settings_conf = scmi_pinctrl_settings_conf,
+ .pin_request = scmi_pinctrl_pin_request,
+ .pin_free = scmi_pinctrl_pin_free,
+};
+
+static int scmi_pinctrl_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ int ret;
+ u32 version;
+ struct scmi_pinctrl_info *pinfo;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_dbg(ph->dev, "Pinctrl Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ ret = scmi_pinctrl_attributes_get(ph, pinfo);
+ if (ret)
+ return ret;
+
+ pinfo->pins = devm_kcalloc(ph->dev, pinfo->nr_pins,
+ sizeof(*pinfo->pins), GFP_KERNEL);
+ if (!pinfo->pins)
+ return -ENOMEM;
+
+ pinfo->groups = devm_kcalloc(ph->dev, pinfo->nr_groups,
+ sizeof(*pinfo->groups), GFP_KERNEL);
+ if (!pinfo->groups)
+ return -ENOMEM;
+
+ pinfo->functions = devm_kcalloc(ph->dev, pinfo->nr_functions,
+ sizeof(*pinfo->functions), GFP_KERNEL);
+ if (!pinfo->functions)
+ return -ENOMEM;
+
+ pinfo->version = version;
+
+ return ph->set_priv(ph, pinfo, version);
+}
+
+static int scmi_pinctrl_protocol_deinit(const struct scmi_protocol_handle *ph)
+{
+ int i;
+ struct scmi_pinctrl_info *pi = ph->get_priv(ph);
+
+ /* Free groups_pins allocated in scmi_pinctrl_get_group_info */
+ for (i = 0; i < pi->nr_groups; i++) {
+ if (pi->groups[i].present) {
+ kfree(pi->groups[i].group_pins);
+ pi->groups[i].present = false;
+ }
+ }
+
+ /* Free groups allocated in scmi_pinctrl_get_function_info */
+ for (i = 0; i < pi->nr_functions; i++) {
+ if (pi->functions[i].present) {
+ kfree(pi->functions[i].groups);
+ pi->functions[i].present = false;
+ }
+ }
+
+ return 0;
+}
+
+static const struct scmi_protocol scmi_pinctrl = {
+ .id = SCMI_PROTOCOL_PINCTRL,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_pinctrl_protocol_init,
+ .instance_deinit = &scmi_pinctrl_protocol_deinit,
+ .ops = &pinctrl_proto_ops,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
+};
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(pinctrl, scmi_pinctrl)
diff --git a/drivers/firmware/arm_scmi/powercap.c b/drivers/firmware/arm_scmi/powercap.c
index ea9201e7044c..1fa79bba492e 100644
--- a/drivers/firmware/arm_scmi/powercap.c
+++ b/drivers/firmware/arm_scmi/powercap.c
@@ -736,7 +736,7 @@ static void scmi_powercap_domain_init_fc(const struct scmi_protocol_handle *ph,
ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
POWERCAP_PAI_GET, 4, domain,
&fc[POWERCAP_FC_PAI].get_addr, NULL,
- &fc[POWERCAP_PAI_GET].rate_limit);
+ &fc[POWERCAP_FC_PAI].rate_limit);
*p_fc = fc;
}
diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h
index 317d3fb32676..8e95f53bd7b7 100644
--- a/drivers/firmware/arm_scmi/protocols.h
+++ b/drivers/firmware/arm_scmi/protocols.h
@@ -29,6 +29,8 @@
#define PROTOCOL_REV_MAJOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x))))
#define PROTOCOL_REV_MINOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x))))
+#define SCMI_PROTOCOL_VENDOR_BASE 0x80
+
enum scmi_common_cmd {
PROTOCOL_VERSION = 0x0,
PROTOCOL_ATTRIBUTES = 0x1,
@@ -258,6 +260,7 @@ struct scmi_fc_info {
* @fastchannel_init: A common helper used to initialize FC descriptors by
* gathering FC descriptions from the SCMI platform server.
* @fastchannel_db_ring: A common helper to ring a FC doorbell.
+ * @get_max_msg_size: A common helper to get the maximum message size.
*/
struct scmi_proto_helpers_ops {
int (*extended_name_get)(const struct scmi_protocol_handle *ph,
@@ -277,6 +280,7 @@ struct scmi_proto_helpers_ops {
struct scmi_fc_db_info **p_db,
u32 *rate_limit);
void (*fastchannel_db_ring)(struct scmi_fc_db_info *db);
+ int (*get_max_msg_size)(const struct scmi_protocol_handle *ph);
};
/**
@@ -323,6 +327,16 @@ typedef int (*scmi_prot_init_ph_fn_t)(const struct scmi_protocol_handle *);
* protocol by the agent. Each protocol implementation
* in the agent is supposed to downgrade to match the
* protocol version supported by the platform.
+ * @vendor_id: A firmware vendor string for vendor protocols matching.
+ * Ignored when @id identifies a standard protocol, cannot be NULL
+ * otherwise.
+ * @sub_vendor_id: A firmware sub_vendor string for vendor protocols matching.
+ * Ignored if NULL or when @id identifies a standard protocol.
+ * @impl_ver: A firmware implementation version for vendor protocols matching.
+ * Ignored if zero or if @id identifies a standard protocol.
+ *
+ * Note that vendor protocols matching at load time is performed by attempting
+ * the closest match first against the tuple (vendor, sub_vendor, impl_ver)
*/
struct scmi_protocol {
const u8 id;
@@ -332,6 +346,9 @@ struct scmi_protocol {
const void *ops;
const struct scmi_protocol_events *events;
unsigned int supported_version;
+ char *vendor_id;
+ char *sub_vendor_id;
+ u32 impl_ver;
};
#define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(name, proto) \
@@ -353,6 +370,7 @@ void __exit scmi_##name##_unregister(void) \
DECLARE_SCMI_REGISTER_UNREGISTER(base);
DECLARE_SCMI_REGISTER_UNREGISTER(clock);
DECLARE_SCMI_REGISTER_UNREGISTER(perf);
+DECLARE_SCMI_REGISTER_UNREGISTER(pinctrl);
DECLARE_SCMI_REGISTER_UNREGISTER(power);
DECLARE_SCMI_REGISTER_UNREGISTER(reset);
DECLARE_SCMI_REGISTER_UNREGISTER(sensors);
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
index 350573518503..130d13e9cd6b 100644
--- a/drivers/firmware/arm_scmi/raw_mode.c
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -921,7 +921,7 @@ static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
rd->raw = raw;
filp->private_data = rd;
- return 0;
+ return nonseekable_open(inode, filp);
}
static int scmi_dbg_raw_mode_release(struct inode *inode, struct file *filp)
@@ -950,6 +950,7 @@ static const struct file_operations scmi_dbg_raw_mode_reset_fops = {
.open = scmi_dbg_raw_mode_open,
.release = scmi_dbg_raw_mode_release,
.write = scmi_dbg_raw_mode_reset_write,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -959,6 +960,7 @@ static const struct file_operations scmi_dbg_raw_mode_message_fops = {
.read = scmi_dbg_raw_mode_message_read,
.write = scmi_dbg_raw_mode_message_write,
.poll = scmi_dbg_raw_mode_message_poll,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -975,6 +977,7 @@ static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
.read = scmi_dbg_raw_mode_message_read,
.write = scmi_dbg_raw_mode_message_async_write,
.poll = scmi_dbg_raw_mode_message_poll,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -998,6 +1001,7 @@ static const struct file_operations scmi_dbg_raw_mode_notification_fops = {
.release = scmi_dbg_raw_mode_release,
.read = scmi_test_dbg_raw_mode_notif_read,
.poll = scmi_test_dbg_raw_mode_notif_poll,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -1021,6 +1025,7 @@ static const struct file_operations scmi_dbg_raw_mode_errors_fops = {
.release = scmi_dbg_raw_mode_release,
.read = scmi_test_dbg_raw_mode_errors_read,
.poll = scmi_test_dbg_raw_mode_errors_poll,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c
index 5b439d04079c..50f6503fe49f 100644
--- a/drivers/firmware/efi/unaccepted_memory.c
+++ b/drivers/firmware/efi/unaccepted_memory.c
@@ -4,6 +4,7 @@
#include <linux/memblock.h>
#include <linux/spinlock.h>
#include <linux/crash_dump.h>
+#include <linux/nmi.h>
#include <asm/unaccepted_memory.h>
/* Protects unaccepted memory bitmap and accepting_list */
@@ -149,6 +150,9 @@ retry:
}
list_del(&range.list);
+
+ touch_softlockup_watchdog();
+
spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
}
diff --git a/drivers/firmware/google/cbmem.c b/drivers/firmware/google/cbmem.c
index c2bffdc352a3..6f810d720f4d 100644
--- a/drivers/firmware/google/cbmem.c
+++ b/drivers/firmware/google/cbmem.c
@@ -124,7 +124,6 @@ static struct coreboot_driver cbmem_entry_driver = {
.probe = cbmem_entry_probe,
.drv = {
.name = "cbmem",
- .owner = THIS_MODULE,
.dev_groups = dev_groups,
},
.id_table = cbmem_ids,
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index d4b6e581a6c6..fa7752f6e89b 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -85,13 +85,15 @@ static void coreboot_device_release(struct device *dev)
kfree(device);
}
-int coreboot_driver_register(struct coreboot_driver *driver)
+int __coreboot_driver_register(struct coreboot_driver *driver,
+ struct module *owner)
{
driver->drv.bus = &coreboot_bus_type;
+ driver->drv.owner = owner;
return driver_register(&driver->drv);
}
-EXPORT_SYMBOL(coreboot_driver_register);
+EXPORT_SYMBOL(__coreboot_driver_register);
void coreboot_driver_unregister(struct coreboot_driver *driver)
{
diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h
index 86427989c57f..bb6f0f7299b4 100644
--- a/drivers/firmware/google/coreboot_table.h
+++ b/drivers/firmware/google/coreboot_table.h
@@ -97,8 +97,12 @@ struct coreboot_driver {
const struct coreboot_device_id *id_table;
};
+/* use a macro to avoid include chaining to get THIS_MODULE */
+#define coreboot_driver_register(driver) \
+ __coreboot_driver_register(driver, THIS_MODULE)
/* Register a driver that uses the data from a coreboot table. */
-int coreboot_driver_register(struct coreboot_driver *driver);
+int __coreboot_driver_register(struct coreboot_driver *driver,
+ struct module *owner);
/* Unregister a driver that uses the data from a coreboot table. */
void coreboot_driver_unregister(struct coreboot_driver *driver);
diff --git a/drivers/firmware/microchip/mpfs-auto-update.c b/drivers/firmware/microchip/mpfs-auto-update.c
index fbeeaee4ac85..835a19a7a3a0 100644
--- a/drivers/firmware/microchip/mpfs-auto-update.c
+++ b/drivers/firmware/microchip/mpfs-auto-update.c
@@ -206,10 +206,12 @@ static int mpfs_auto_update_verify_image(struct fw_upload *fw_uploader)
if (ret | response->resp_status) {
dev_warn(priv->dev, "Verification of Upgrade Image failed!\n");
ret = ret ? ret : -EBADMSG;
+ goto free_message;
}
dev_info(priv->dev, "Verification of Upgrade Image passed!\n");
+free_message:
devm_kfree(priv->dev, message);
free_response:
devm_kfree(priv->dev, response);
@@ -265,7 +267,7 @@ static int mpfs_auto_update_set_image_address(struct mpfs_auto_update_priv *priv
AUTO_UPDATE_DIRECTORY_WIDTH);
memset(buffer + AUTO_UPDATE_BLANK_DIRECTORY, 0x0, AUTO_UPDATE_DIRECTORY_WIDTH);
- dev_info(priv->dev, "Writing the image address (%x) to the flash directory (%llx)\n",
+ dev_info(priv->dev, "Writing the image address (0x%x) to the flash directory (0x%llx)\n",
image_address, directory_address);
ret = mtd_write(priv->flash, 0x0, erase_size, &bytes_written, (u_char *)buffer);
@@ -313,7 +315,7 @@ static int mpfs_auto_update_write_bitstream(struct fw_upload *fw_uploader, const
erase.len = round_up(size, (size_t)priv->flash->erasesize);
erase.addr = image_address;
- dev_info(priv->dev, "Erasing the flash at address (%x)\n", image_address);
+ dev_info(priv->dev, "Erasing the flash at address (0x%x)\n", image_address);
ret = mtd_erase(priv->flash, &erase);
if (ret)
goto out;
@@ -323,7 +325,7 @@ static int mpfs_auto_update_write_bitstream(struct fw_upload *fw_uploader, const
* will do all of that itself - including verifying that the bitstream
* is valid.
*/
- dev_info(priv->dev, "Writing the image to the flash at address (%x)\n", image_address);
+ dev_info(priv->dev, "Writing the image to the flash at address (0x%x)\n", image_address);
ret = mtd_write(priv->flash, (loff_t)image_address, size, &bytes_written, data);
if (ret)
goto out;
diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
index 32188f098ef3..bc550ad0dbe0 100644
--- a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
+++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
@@ -221,6 +221,19 @@ struct qsee_rsp_uefi_query_variable_info {
* alignment of 8 bytes (64 bits) for GUIDs. Our definition of efi_guid_t,
* however, has an alignment of 4 byte (32 bits). So far, this seems to work
* fine here. See also the comment on the typedef of efi_guid_t.
+ *
+ * Note: It looks like uefisecapp is quite picky about how the memory passed to
+ * it is structured and aligned. In particular the request/response setup used
+ * for QSEE_CMD_UEFI_GET_VARIABLE. While qcom_qseecom_app_send(), in theory,
+ * accepts separate buffers/addresses for the request and response parts, in
+ * practice, however, it seems to expect them to be both part of a larger
+ * contiguous block. We initially allocated separate buffers for the request
+ * and response but this caused the QSEE_CMD_UEFI_GET_VARIABLE command to
+ * either not write any response to the response buffer or outright crash the
+ * device. Therefore, we now allocate a single contiguous block of DMA memory
+ * for both and properly align the data using the macros below. In particular,
+ * request and response structs are aligned at 8 byte (via __reqdata_offs()),
+ * following the driver that this has been reverse-engineered from.
*/
#define qcuefi_buf_align_fields(fields...) \
({ \
@@ -244,6 +257,12 @@ struct qsee_rsp_uefi_query_variable_info {
#define __array_offs(type, count, offset) \
__field_impl(sizeof(type) * (count), __alignof__(type), offset)
+#define __array_offs_aligned(type, count, align, offset) \
+ __field_impl(sizeof(type) * (count), align, offset)
+
+#define __reqdata_offs(size, offset) \
+ __array_offs_aligned(u8, size, 8, offset)
+
#define __array(type, count) __array_offs(type, count, NULL)
#define __field_offs(type, offset) __array_offs(type, 1, offset)
#define __field(type) __array_offs(type, 1, NULL)
@@ -277,10 +296,15 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
unsigned long buffer_size = *data_size;
efi_status_t efi_status = EFI_SUCCESS;
unsigned long name_length;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
size_t guid_offs;
size_t name_offs;
size_t req_size;
size_t rsp_size;
+ size_t req_offs;
+ size_t rsp_offs;
ssize_t status;
if (!name || !guid)
@@ -304,17 +328,19 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
__array(u8, buffer_size)
);
- req_data = kzalloc(req_size, GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(req_size, &req_offs)
+ __reqdata_offs(rsp_size, &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(rsp_size, GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_GET_VARIABLE;
req_data->data_size = buffer_size;
@@ -332,7 +358,9 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size);
- status = qcom_qseecom_app_send(qcuefi->client, req_data, req_size, rsp_data, rsp_size);
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, req_size,
+ cmd_buf_dma + rsp_offs, rsp_size);
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -407,9 +435,7 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
memcpy(data, ((void *)rsp_data) + rsp_data->data_offset, rsp_data->data_size);
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
@@ -422,10 +448,15 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
struct qsee_rsp_uefi_set_variable *rsp_data;
efi_status_t efi_status = EFI_SUCCESS;
unsigned long name_length;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
size_t name_offs;
size_t guid_offs;
size_t data_offs;
size_t req_size;
+ size_t req_offs;
+ size_t rsp_offs;
ssize_t status;
if (!name || !guid)
@@ -450,17 +481,19 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
__array_offs(u8, data_size, &data_offs)
);
- req_data = kzalloc(req_size, GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(req_size, &req_offs)
+ __reqdata_offs(sizeof(*rsp_data), &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(sizeof(*rsp_data), GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_SET_VARIABLE;
req_data->attributes = attributes;
@@ -483,8 +516,9 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
if (data_size)
memcpy(((void *)req_data) + req_data->data_offset, data, req_data->data_size);
- status = qcom_qseecom_app_send(qcuefi->client, req_data, req_size, rsp_data,
- sizeof(*rsp_data));
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, req_size,
+ cmd_buf_dma + rsp_offs, sizeof(*rsp_data));
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -507,9 +541,7 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
}
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
@@ -521,10 +553,15 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
struct qsee_req_uefi_get_next_variable *req_data;
struct qsee_rsp_uefi_get_next_variable *rsp_data;
efi_status_t efi_status = EFI_SUCCESS;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
size_t guid_offs;
size_t name_offs;
size_t req_size;
size_t rsp_size;
+ size_t req_offs;
+ size_t rsp_offs;
ssize_t status;
if (!name_size || !name || !guid)
@@ -545,17 +582,19 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
__array(*name, *name_size / sizeof(*name))
);
- req_data = kzalloc(req_size, GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(req_size, &req_offs)
+ __reqdata_offs(rsp_size, &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(rsp_size, GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_GET_NEXT_VARIABLE;
req_data->guid_offset = guid_offs;
@@ -572,7 +611,9 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
goto out_free;
}
- status = qcom_qseecom_app_send(qcuefi->client, req_data, req_size, rsp_data, rsp_size);
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, req_size,
+ cmd_buf_dma + rsp_offs, rsp_size);
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -645,9 +686,7 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
}
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
@@ -659,26 +698,34 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi,
struct qsee_req_uefi_query_variable_info *req_data;
struct qsee_rsp_uefi_query_variable_info *rsp_data;
efi_status_t efi_status = EFI_SUCCESS;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
+ size_t req_offs;
+ size_t rsp_offs;
int status;
- req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(sizeof(*req_data), &req_offs)
+ __reqdata_offs(sizeof(*rsp_data), &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(sizeof(*rsp_data), GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_QUERY_VARIABLE_INFO;
req_data->attributes = attr;
req_data->length = sizeof(*req_data);
- status = qcom_qseecom_app_send(qcuefi->client, req_data, sizeof(*req_data), rsp_data,
- sizeof(*rsp_data));
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, sizeof(*req_data),
+ cmd_buf_dma + rsp_offs, sizeof(*rsp_data));
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -711,9 +758,7 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi,
*max_variable_size = rsp_data->max_variable_size;
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index 520de9b5633a..68f4df7e6c3c 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -4,6 +4,8 @@
*/
#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/cpumask.h>
@@ -114,6 +116,10 @@ static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
#define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0)
#define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1)
+#define QCOM_DLOAD_MASK GENMASK(5, 4)
+#define QCOM_DLOAD_NODUMP 0
+#define QCOM_DLOAD_FULLDUMP 1
+
static const char * const qcom_scm_convention_names[] = {
[SMC_CONVENTION_UNKNOWN] = "unknown",
[SMC_CONVENTION_ARM_32] = "smc arm 32",
@@ -163,9 +169,6 @@ static int qcom_scm_bw_enable(void)
if (!__scm->path)
return 0;
- if (IS_ERR(__scm->path))
- return -EINVAL;
-
mutex_lock(&__scm->scm_bw_lock);
if (!__scm->scm_vote_count) {
ret = icc_set_bw(__scm->path, 0, UINT_MAX);
@@ -183,7 +186,7 @@ err_bw:
static void qcom_scm_bw_disable(void)
{
- if (IS_ERR_OR_NULL(__scm->path))
+ if (!__scm->path)
return;
mutex_lock(&__scm->scm_bw_lock);
@@ -496,19 +499,32 @@ static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
}
+static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val)
+{
+ unsigned int old;
+ unsigned int new;
+ int ret;
+
+ ret = qcom_scm_io_readl(addr, &old);
+ if (ret)
+ return ret;
+
+ new = (old & ~mask) | (val & mask);
+
+ return qcom_scm_io_writel(addr, new);
+}
+
static void qcom_scm_set_download_mode(bool enable)
{
- bool avail;
+ u32 val = enable ? QCOM_DLOAD_FULLDUMP : QCOM_DLOAD_NODUMP;
int ret = 0;
- avail = __qcom_scm_is_call_available(__scm->dev,
- QCOM_SCM_SVC_BOOT,
- QCOM_SCM_BOOT_SET_DLOAD_MODE);
- if (avail) {
+ if (__scm->dload_mode_addr) {
+ ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK,
+ FIELD_PREP(QCOM_DLOAD_MASK, val));
+ } else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT,
+ QCOM_SCM_BOOT_SET_DLOAD_MODE)) {
ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
- } else if (__scm->dload_mode_addr) {
- ret = qcom_scm_io_writel(__scm->dload_mode_addr,
- enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
} else {
dev_err(__scm->dev,
"No available mechanism for setting download mode\n");
@@ -557,10 +573,9 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
*/
mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
GFP_KERNEL);
- if (!mdata_buf) {
- dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
+ if (!mdata_buf)
return -ENOMEM;
- }
+
memcpy(mdata_buf, metadata, size);
ret = qcom_scm_clk_enable();
@@ -569,13 +584,14 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
ret = qcom_scm_bw_enable();
if (ret)
- return ret;
+ goto disable_clk;
desc.args[1] = mdata_phys;
ret = qcom_scm_call(__scm->dev, &desc, &res);
-
qcom_scm_bw_disable();
+
+disable_clk:
qcom_scm_clk_disable();
out:
@@ -637,10 +653,12 @@ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
ret = qcom_scm_bw_enable();
if (ret)
- return ret;
+ goto disable_clk;
ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_bw_disable();
+
+disable_clk:
qcom_scm_clk_disable();
return ret ? : res.result[0];
@@ -672,10 +690,12 @@ int qcom_scm_pas_auth_and_reset(u32 peripheral)
ret = qcom_scm_bw_enable();
if (ret)
- return ret;
+ goto disable_clk;
ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_bw_disable();
+
+disable_clk:
qcom_scm_clk_disable();
return ret ? : res.result[0];
@@ -706,11 +726,12 @@ int qcom_scm_pas_shutdown(u32 peripheral)
ret = qcom_scm_bw_enable();
if (ret)
- return ret;
+ goto disable_clk;
ret = qcom_scm_call(__scm->dev, &desc, &res);
-
qcom_scm_bw_disable();
+
+disable_clk:
qcom_scm_clk_disable();
return ret ? : res.result[0];
@@ -1576,9 +1597,9 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
/**
* qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
* @app_id: The ID of the target app.
- * @req: Request buffer sent to the app (must be DMA-mappable).
+ * @req: DMA address of the request buffer sent to the app.
* @req_size: Size of the request buffer.
- * @rsp: Response buffer, written to by the app (must be DMA-mappable).
+ * @rsp: DMA address of the response buffer, written to by the app.
* @rsp_size: Size of the response buffer.
*
* Sends a request to the QSEE app associated with the given ID and read back
@@ -1589,33 +1610,13 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
*
* Return: Zero on success, nonzero on failure.
*/
-int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
- size_t rsp_size)
+int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size,
+ dma_addr_t rsp, size_t rsp_size)
{
struct qcom_scm_qseecom_resp res = {};
struct qcom_scm_desc desc = {};
- dma_addr_t req_phys;
- dma_addr_t rsp_phys;
int status;
- /* Map request buffer */
- req_phys = dma_map_single(__scm->dev, req, req_size, DMA_TO_DEVICE);
- status = dma_mapping_error(__scm->dev, req_phys);
- if (status) {
- dev_err(__scm->dev, "qseecom: failed to map request buffer\n");
- return status;
- }
-
- /* Map response buffer */
- rsp_phys = dma_map_single(__scm->dev, rsp, rsp_size, DMA_FROM_DEVICE);
- status = dma_mapping_error(__scm->dev, rsp_phys);
- if (status) {
- dma_unmap_single(__scm->dev, req_phys, req_size, DMA_TO_DEVICE);
- dev_err(__scm->dev, "qseecom: failed to map response buffer\n");
- return status;
- }
-
- /* Set up SCM call data */
desc.owner = QSEECOM_TZ_OWNER_TZ_APPS;
desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER;
desc.cmd = QSEECOM_TZ_CMD_APP_SEND;
@@ -1623,18 +1624,13 @@ int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
QCOM_SCM_RW, QCOM_SCM_VAL,
QCOM_SCM_RW, QCOM_SCM_VAL);
desc.args[0] = app_id;
- desc.args[1] = req_phys;
+ desc.args[1] = req;
desc.args[2] = req_size;
- desc.args[3] = rsp_phys;
+ desc.args[3] = rsp;
desc.args[4] = rsp_size;
- /* Perform call */
status = qcom_scm_qseecom_call(&desc, &res);
- /* Unmap buffers */
- dma_unmap_single(__scm->dev, rsp_phys, rsp_size, DMA_FROM_DEVICE);
- dma_unmap_single(__scm->dev, req_phys, req_size, DMA_TO_DEVICE);
-
if (status)
return status;
@@ -1649,8 +1645,10 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
* We do not yet support re-entrant calls via the qseecom interface. To prevent
+ any potential issues with this, only allow validated machines for now.
*/
-static const struct of_device_id qcom_scm_qseecom_allowlist[] = {
+static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
+ { .compatible = "lenovo,flex-5g" },
{ .compatible = "lenovo,thinkpad-x13s", },
+ { .compatible = "qcom,sc8180x-primus" },
{ }
};
@@ -1738,7 +1736,7 @@ static int qcom_scm_qseecom_init(struct qcom_scm *scm)
*/
bool qcom_scm_is_available(void)
{
- return !!__scm;
+ return !!READ_ONCE(__scm);
}
EXPORT_SYMBOL_GPL(qcom_scm_is_available);
@@ -1769,7 +1767,7 @@ int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
return 0;
}
-static int qcom_scm_waitq_wakeup(struct qcom_scm *scm, unsigned int wq_ctx)
+static int qcom_scm_waitq_wakeup(unsigned int wq_ctx)
{
int ret;
@@ -1801,7 +1799,7 @@ static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
goto out;
}
- ret = qcom_scm_waitq_wakeup(scm, wq_ctx);
+ ret = qcom_scm_waitq_wakeup(wq_ctx);
if (ret)
goto out;
} while (more_pending);
@@ -1819,10 +1817,12 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (!scm)
return -ENOMEM;
+ scm->dev = &pdev->dev;
ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
if (ret < 0)
return ret;
+ init_completion(&scm->waitq_comp);
mutex_init(&scm->scm_bw_lock);
scm->path = devm_of_icc_get(&pdev->dev, NULL);
@@ -1854,10 +1854,8 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (ret)
return ret;
- __scm = scm;
- __scm->dev = &pdev->dev;
-
- init_completion(&__scm->waitq_comp);
+ /* Let all above stores be available after this */
+ smp_store_release(&__scm, scm);
irq = platform_get_irq_optional(pdev, 0);
if (irq < 0) {
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index 322aada20f74..ac34876a97f8 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -9,6 +9,7 @@
#include <linux/dma-mapping.h>
#include <linux/kref.h>
#include <linux/mailbox_client.h>
+#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -97,8 +98,8 @@ int rpi_firmware_property_list(struct rpi_firmware *fw,
if (size & 3)
return -EINVAL;
- buf = dma_alloc_coherent(fw->cl.dev, PAGE_ALIGN(size), &bus_addr,
- GFP_ATOMIC);
+ buf = dma_alloc_coherent(fw->chan->mbox->dev, PAGE_ALIGN(size),
+ &bus_addr, GFP_ATOMIC);
if (!buf)
return -ENOMEM;
@@ -126,7 +127,7 @@ int rpi_firmware_property_list(struct rpi_firmware *fw,
ret = -EINVAL;
}
- dma_free_coherent(fw->cl.dev, PAGE_ALIGN(size), buf, bus_addr);
+ dma_free_coherent(fw->chan->mbox->dev, PAGE_ALIGN(size), buf, bus_addr);
return ret;
}
diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
index db818f9dcb8e..d670635914ec 100644
--- a/drivers/firmware/smccc/smccc.c
+++ b/drivers/firmware/smccc/smccc.c
@@ -69,6 +69,7 @@ s32 arm_smccc_get_soc_id_revision(void)
{
return smccc_soc_id_revision;
}
+EXPORT_SYMBOL_GPL(arm_smccc_get_soc_id_revision);
static int __init smccc_devices_init(void)
{
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 8b9a2556de16..160968301b1f 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -87,7 +87,6 @@ struct ti_sci_desc {
* struct ti_sci_info - Structure representing a TI SCI instance
* @dev: Device pointer
* @desc: SoC description for this instance
- * @nb: Reboot Notifier block
* @d: Debugfs file entry
* @debug_region: Memory region where the debug message are available
* @debug_region_size: Debug region size
@@ -103,7 +102,6 @@ struct ti_sci_desc {
*/
struct ti_sci_info {
struct device *dev;
- struct notifier_block nb;
const struct ti_sci_desc *desc;
struct dentry *d;
void __iomem *debug_region;
@@ -122,7 +120,6 @@ struct ti_sci_info {
#define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl)
#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
-#define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
#ifdef CONFIG_DEBUG_FS
@@ -3254,10 +3251,9 @@ devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
-static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
- void *cmd)
+static int tisci_reboot_handler(struct sys_off_data *data)
{
- struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
+ struct ti_sci_info *info = data->cb_data;
const struct ti_sci_handle *handle = &info->handle;
ti_sci_cmd_core_reboot(handle);
@@ -3303,7 +3299,6 @@ static int ti_sci_probe(struct platform_device *pdev)
struct mbox_client *cl;
int ret = -EINVAL;
int i;
- int reboot = 0;
u32 h_id;
desc = device_get_match_data(dev);
@@ -3327,8 +3322,6 @@ static int ti_sci_probe(struct platform_device *pdev)
}
}
- reboot = of_property_read_bool(dev->of_node,
- "ti,system-reboot-controller");
INIT_LIST_HEAD(&info->node);
minfo = &info->minfo;
@@ -3399,15 +3392,10 @@ static int ti_sci_probe(struct platform_device *pdev)
ti_sci_setup_ops(info);
- if (reboot) {
- info->nb.notifier_call = tisci_reboot_handler;
- info->nb.priority = 128;
-
- ret = register_restart_handler(&info->nb);
- if (ret) {
- dev_err(dev, "reboot registration fail(%d)\n", ret);
- goto out;
- }
+ ret = devm_register_restart_handler(dev, tisci_reboot_handler, info);
+ if (ret) {
+ dev_err(dev, "reboot registration fail(%d)\n", ret);
+ goto out;
}
dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index 98b8fd16183e..80cac3a5f976 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -78,6 +78,7 @@ static void cci_pci_free_irq(struct pci_dev *pcidev)
#define PCIE_DEVICE_ID_SILICOM_PAC_N5011 0x1001
#define PCIE_DEVICE_ID_INTEL_DFL 0xbcce
/* PCI Subdevice ID for PCIE_DEVICE_ID_INTEL_DFL */
+#define PCIE_SUBDEVICE_ID_INTEL_D5005 0x138d
#define PCIE_SUBDEVICE_ID_INTEL_N6000 0x1770
#define PCIE_SUBDEVICE_ID_INTEL_N6001 0x1771
#define PCIE_SUBDEVICE_ID_INTEL_C6100 0x17d4
@@ -102,6 +103,8 @@ static struct pci_device_id cci_pcie_id_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5010),},
{PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5011),},
{PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
+ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_D5005),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),},
{PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),},
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 1ee62cd58582..25db014494a4 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -92,7 +92,7 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type)
case 0x5e:
return GPIOPANELCTL;
default:
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
}
}
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 5ef8af824980..c097e310c9e8 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -529,6 +529,7 @@ static const struct of_device_id lpc32xx_gpio_of_match[] = {
{ .compatible = "nxp,lpc3220-gpio", },
{ },
};
+MODULE_DEVICE_TABLE(of, lpc32xx_gpio_of_match);
static struct platform_driver lpc32xx_gpio_driver = {
.driver = {
diff --git a/drivers/gpio/gpio-tangier.c b/drivers/gpio/gpio-tangier.c
index b75e0b12087a..4b29abafecf6 100644
--- a/drivers/gpio/gpio-tangier.c
+++ b/drivers/gpio/gpio-tangier.c
@@ -195,7 +195,8 @@ static int tng_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
static void tng_irq_ack(struct irq_data *d)
{
- struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tng_gpio *priv = gpiochip_get_data(gc);
irq_hw_number_t gpio = irqd_to_hwirq(d);
void __iomem *gisr;
u8 shift;
@@ -227,7 +228,8 @@ static void tng_irq_unmask_mask(struct tng_gpio *priv, u32 gpio, bool unmask)
static void tng_irq_mask(struct irq_data *d)
{
- struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tng_gpio *priv = gpiochip_get_data(gc);
irq_hw_number_t gpio = irqd_to_hwirq(d);
tng_irq_unmask_mask(priv, gpio, false);
@@ -236,7 +238,8 @@ static void tng_irq_mask(struct irq_data *d)
static void tng_irq_unmask(struct irq_data *d)
{
- struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tng_gpio *priv = gpiochip_get_data(gc);
irq_hw_number_t gpio = irqd_to_hwirq(d);
gpiochip_enable_irq(&priv->chip, gpio);
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index d87dd06db40d..9130c691a2dd 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -36,12 +36,6 @@
#define TEGRA186_GPIO_SCR_SEC_REN BIT(27)
#define TEGRA186_GPIO_SCR_SEC_G1W BIT(9)
#define TEGRA186_GPIO_SCR_SEC_G1R BIT(1)
-#define TEGRA186_GPIO_FULL_ACCESS (TEGRA186_GPIO_SCR_SEC_WEN | \
- TEGRA186_GPIO_SCR_SEC_REN | \
- TEGRA186_GPIO_SCR_SEC_G1R | \
- TEGRA186_GPIO_SCR_SEC_G1W)
-#define TEGRA186_GPIO_SCR_SEC_ENABLE (TEGRA186_GPIO_SCR_SEC_WEN | \
- TEGRA186_GPIO_SCR_SEC_REN)
/* control registers */
#define TEGRA186_GPIO_ENABLE_CONFIG 0x00
@@ -177,10 +171,18 @@ static inline bool tegra186_gpio_is_accessible(struct tegra_gpio *gpio, unsigned
value = __raw_readl(secure + TEGRA186_GPIO_SCR);
- if ((value & TEGRA186_GPIO_SCR_SEC_ENABLE) == 0)
- return true;
+ /*
+ * When SCR_SEC_[R|W]EN is unset, then we have full read/write access to all the
+ * registers for given GPIO pin.
+ * When SCR_SEC[R|W]EN is set, then there is need to further check the accompanying
+ * SCR_SEC_G1[R|W] bit to determine read/write access to all the registers for given
+ * GPIO pin.
+ */
- if ((value & TEGRA186_GPIO_FULL_ACCESS) == TEGRA186_GPIO_FULL_ACCESS)
+ if (((value & TEGRA186_GPIO_SCR_SEC_REN) == 0 ||
+ ((value & TEGRA186_GPIO_SCR_SEC_REN) && (value & TEGRA186_GPIO_SCR_SEC_G1R))) &&
+ ((value & TEGRA186_GPIO_SCR_SEC_WEN) == 0 ||
+ ((value & TEGRA186_GPIO_SCR_SEC_WEN) && (value & TEGRA186_GPIO_SCR_SEC_G1W))))
return true;
return false;
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index c18b6b47384f..94ca9d03c094 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -104,7 +104,7 @@ static inline int to_reg(int gpio, enum ctrl_register type)
unsigned int reg = type == CTRL_IN ? GPIO_IN_CTRL_BASE : GPIO_OUT_CTRL_BASE;
if (gpio >= WCOVE_GPIO_NUM)
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
return reg + gpio;
}
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index d09c7d728365..9dad67ea2597 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -1193,6 +1193,8 @@ static int edge_detector_update(struct line *line,
struct gpio_v2_line_config *lc,
unsigned int line_idx, u64 edflags)
{
+ u64 eflags;
+ int ret;
u64 active_edflags = READ_ONCE(line->edflags);
unsigned int debounce_period_us =
gpio_v2_line_config_debounce_period(lc, line_idx);
@@ -1204,6 +1206,18 @@ static int edge_detector_update(struct line *line,
/* sw debounced and still will be...*/
if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
line_set_debounce_period(line, debounce_period_us);
+ /*
+ * ensure event fifo is initialised if edge detection
+ * is now enabled.
+ */
+ eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
+ if (eflags && !kfifo_initialized(&line->req->events)) {
+ ret = kfifo_alloc(&line->req->events,
+ line->req->event_buffer_size,
+ GFP_KERNEL);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -2351,7 +2365,7 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
dflags = READ_ONCE(desc->flags);
- scoped_guard(srcu, &desc->srcu) {
+ scoped_guard(srcu, &desc->gdev->desc_srcu) {
label = gpiod_get_label(desc);
if (label && test_bit(FLAG_REQUESTED, &dflags))
strscpy(info->consumer, label,
@@ -2799,11 +2813,11 @@ static int gpio_chrdev_release(struct inode *inode, struct file *file)
struct gpio_chardev_data *cdev = file->private_data;
struct gpio_device *gdev = cdev->gdev;
- bitmap_free(cdev->watched_lines);
blocking_notifier_chain_unregister(&gdev->device_notifier,
&cdev->device_unregistered_nb);
blocking_notifier_chain_unregister(&gdev->line_state_notifier,
&cdev->lineinfo_changed_nb);
+ bitmap_free(cdev->watched_lines);
gpio_device_put(gdev);
kfree(cdev);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 94903fc1c145..fa50db0c3605 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -101,6 +101,7 @@ static bool gpiolib_initialized;
const char *gpiod_get_label(struct gpio_desc *desc)
{
+ struct gpio_desc_label *label;
unsigned long flags;
flags = READ_ONCE(desc->flags);
@@ -108,23 +109,36 @@ const char *gpiod_get_label(struct gpio_desc *desc)
!test_bit(FLAG_REQUESTED, &flags))
return "interrupt";
- return test_bit(FLAG_REQUESTED, &flags) ?
- srcu_dereference(desc->label, &desc->srcu) : NULL;
+ if (!test_bit(FLAG_REQUESTED, &flags))
+ return NULL;
+
+ label = srcu_dereference_check(desc->label, &desc->gdev->desc_srcu,
+ srcu_read_lock_held(&desc->gdev->desc_srcu));
+
+ return label->str;
+}
+
+static void desc_free_label(struct rcu_head *rh)
+{
+ kfree(container_of(rh, struct gpio_desc_label, rh));
}
static int desc_set_label(struct gpio_desc *desc, const char *label)
{
- const char *new = NULL, *old;
+ struct gpio_desc_label *new = NULL, *old;
if (label) {
- new = kstrdup_const(label, GFP_KERNEL);
+ new = kzalloc(struct_size(new, str, strlen(label) + 1),
+ GFP_KERNEL);
if (!new)
return -ENOMEM;
+
+ strcpy(new->str, label);
}
old = rcu_replace_pointer(desc->label, new, 1);
- synchronize_srcu(&desc->srcu);
- kfree_const(old);
+ if (old)
+ call_srcu(&desc->gdev->desc_srcu, &old->rh, desc_free_label);
return 0;
}
@@ -695,10 +709,10 @@ EXPORT_SYMBOL_GPL(gpiochip_line_is_valid);
static void gpiodev_release(struct device *dev)
{
struct gpio_device *gdev = to_gpio_device(dev);
- unsigned int i;
- for (i = 0; i < gdev->ngpio; i++)
- cleanup_srcu_struct(&gdev->descs[i].srcu);
+ /* Call pending kfree()s for descriptor labels. */
+ synchronize_srcu(&gdev->desc_srcu);
+ cleanup_srcu_struct(&gdev->desc_srcu);
ida_free(&gpio_ida, gdev->id);
kfree_const(gdev->label);
@@ -975,6 +989,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
if (ret)
goto err_remove_from_list;
+ ret = init_srcu_struct(&gdev->desc_srcu);
+ if (ret)
+ goto err_cleanup_gdev_srcu;
+
#ifdef CONFIG_PINCTRL
INIT_LIST_HEAD(&gdev->pin_ranges);
#endif
@@ -982,23 +1000,19 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
if (gc->names) {
ret = gpiochip_set_desc_names(gc);
if (ret)
- goto err_cleanup_gdev_srcu;
+ goto err_cleanup_desc_srcu;
}
ret = gpiochip_set_names(gc);
if (ret)
- goto err_cleanup_gdev_srcu;
+ goto err_cleanup_desc_srcu;
ret = gpiochip_init_valid_mask(gc);
if (ret)
- goto err_cleanup_gdev_srcu;
+ goto err_cleanup_desc_srcu;
for (desc_index = 0; desc_index < gc->ngpio; desc_index++) {
struct gpio_desc *desc = &gdev->descs[desc_index];
- ret = init_srcu_struct(&desc->srcu);
- if (ret)
- goto err_cleanup_desc_srcu;
-
if (gc->get_direction && gpiochip_line_is_valid(gc, desc_index)) {
assign_bit(FLAG_IS_OUT,
&desc->flags, !gc->get_direction(gc, desc_index));
@@ -1010,7 +1024,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
ret = of_gpiochip_add(gc);
if (ret)
- goto err_cleanup_desc_srcu;
+ goto err_free_valid_mask;
ret = gpiochip_add_pin_ranges(gc);
if (ret)
@@ -1057,10 +1071,10 @@ err_free_hogs:
gpiochip_remove_pin_ranges(gc);
err_remove_of_chip:
of_gpiochip_remove(gc);
-err_cleanup_desc_srcu:
- while (desc_index--)
- cleanup_srcu_struct(&gdev->descs[desc_index].srcu);
+err_free_valid_mask:
gpiochip_free_valid_mask(gc);
+err_cleanup_desc_srcu:
+ cleanup_srcu_struct(&gdev->desc_srcu);
err_cleanup_gdev_srcu:
cleanup_srcu_struct(&gdev->srcu);
err_remove_from_list:
@@ -2390,7 +2404,7 @@ char *gpiochip_dup_line_label(struct gpio_chip *gc, unsigned int offset)
if (!test_bit(FLAG_REQUESTED, &desc->flags))
return NULL;
- guard(srcu)(&desc->srcu);
+ guard(srcu)(&desc->gdev->desc_srcu);
label = kstrdup(gpiod_get_label(desc), GFP_KERNEL);
if (!label)
@@ -4781,7 +4795,7 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
}
for_each_gpio_desc(gc, desc) {
- guard(srcu)(&desc->srcu);
+ guard(srcu)(&desc->gdev->desc_srcu);
if (test_bit(FLAG_REQUESTED, &desc->flags)) {
gpiod_get_direction(desc);
is_out = test_bit(FLAG_IS_OUT, &desc->flags);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index f67d5991ab1c..8e0e211ebf08 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -31,6 +31,7 @@
* @chip: pointer to the corresponding gpiochip, holding static
* data for this device
* @descs: array of ngpio descriptors.
+ * @desc_srcu: ensures consistent state of GPIO descriptors exposed to users
* @ngpio: the number of GPIO lines on this GPIO device, equal to the size
* of the @descs array.
* @can_sleep: indicate whether the GPIO chip driver's callbacks can sleep
@@ -61,6 +62,7 @@ struct gpio_device {
struct module *owner;
struct gpio_chip __rcu *chip;
struct gpio_desc *descs;
+ struct srcu_struct desc_srcu;
int base;
u16 ngpio;
bool can_sleep;
@@ -137,6 +139,11 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
void gpiod_line_state_notify(struct gpio_desc *desc, unsigned long action);
+struct gpio_desc_label {
+ struct rcu_head rh;
+ char str[];
+};
+
/**
* struct gpio_desc - Opaque descriptor for a GPIO
*
@@ -145,7 +152,6 @@ void gpiod_line_state_notify(struct gpio_desc *desc, unsigned long action);
* @label: Name of the consumer
* @name: Line name
* @hog: Pointer to the device node that hogs this line (if any)
- * @srcu: SRCU struct protecting the label pointer.
*
* These are obtained using gpiod_get() and are preferable to the old
* integer-based handles.
@@ -177,13 +183,12 @@ struct gpio_desc {
#define FLAG_EVENT_CLOCK_HTE 19 /* GPIO CDEV reports hardware timestamps in events */
/* Connection label */
- const char __rcu *label;
+ struct gpio_desc_label __rcu *label;
/* Name of the GPIO */
const char *name;
#ifdef CONFIG_OF_DYNAMIC
struct device_node *hog;
#endif
- struct srcu_struct srcu;
};
#define gpiod_not_found(desc) (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT)
@@ -251,7 +256,7 @@ static inline int gpio_chip_hwgpio(const struct gpio_desc *desc)
#define gpiod_err(desc, fmt, ...) \
do { \
- scoped_guard(srcu, &desc->srcu) { \
+ scoped_guard(srcu, &desc->gdev->desc_srcu) { \
pr_err("gpio-%d (%s): " fmt, desc_to_gpio(desc), \
gpiod_get_label(desc) ? : "?", ##__VA_ARGS__); \
} \
@@ -259,7 +264,7 @@ do { \
#define gpiod_warn(desc, fmt, ...) \
do { \
- scoped_guard(srcu, &desc->srcu) { \
+ scoped_guard(srcu, &desc->gdev->desc_srcu) { \
pr_warn("gpio-%d (%s): " fmt, desc_to_gpio(desc), \
gpiod_get_label(desc) ? : "?", ##__VA_ARGS__); \
} \
@@ -267,7 +272,7 @@ do { \
#define gpiod_dbg(desc, fmt, ...) \
do { \
- scoped_guard(srcu, &desc->srcu) { \
+ scoped_guard(srcu, &desc->gdev->desc_srcu) { \
pr_debug("gpio-%d (%s): " fmt, desc_to_gpio(desc), \
gpiod_get_label(desc) ? : "?", ##__VA_ARGS__); \
} \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 9c62552bec34..b3b84647207e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -210,6 +210,7 @@ extern int amdgpu_async_gfx_ring;
extern int amdgpu_mcbp;
extern int amdgpu_discovery;
extern int amdgpu_mes;
+extern int amdgpu_mes_log_enable;
extern int amdgpu_mes_kiq;
extern int amdgpu_noretry;
extern int amdgpu_force_asic_type;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index df58a6a1a67e..e4d4e55c08ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -220,7 +220,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
(kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
kfd_mem_limit.max_ttm_mem_limit) ||
(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed >
- vram_size - reserved_for_pt)) {
+ vram_size - reserved_for_pt - atomic64_read(&adev->vram_pin_size))) {
ret = -ENOMEM;
goto release;
}
@@ -1854,6 +1854,7 @@ err_node_allow:
err_bo_create:
amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id);
err_reserve_limit:
+ amdgpu_sync_free(&(*mem)->sync);
mutex_destroy(&(*mem)->lock);
if (gobj)
drm_gem_object_put(gobj);
@@ -2900,13 +2901,12 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
amdgpu_sync_create(&sync_obj);
- /* Validate BOs and map them to GPUVM (update VM page tables). */
+ /* Validate BOs managed by KFD */
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list) {
struct amdgpu_bo *bo = mem->bo;
uint32_t domain = mem->domain;
- struct kfd_mem_attachment *attachment;
struct dma_resv_iter cursor;
struct dma_fence *fence;
@@ -2931,6 +2931,25 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
goto validate_map_fail;
}
}
+ }
+
+ if (failed_size)
+ pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
+
+ /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
+ * validations above would invalidate DMABuf imports again.
+ */
+ ret = process_validate_vms(process_info, &exec.ticket);
+ if (ret) {
+ pr_debug("Validating VMs failed, ret: %d\n", ret);
+ goto validate_map_fail;
+ }
+
+ /* Update mappings managed by KFD. */
+ list_for_each_entry(mem, &process_info->kfd_bo_list,
+ validate_list) {
+ struct kfd_mem_attachment *attachment;
+
list_for_each_entry(attachment, &mem->attachments, list) {
if (!attachment->is_mapped)
continue;
@@ -2947,18 +2966,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
}
}
- if (failed_size)
- pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
-
- /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
- * validations above would invalidate DMABuf imports again.
- */
- ret = process_validate_vms(process_info, &exec.ticket);
- if (ret) {
- pr_debug("Validating VMs failed, ret: %d\n", ret);
- goto validate_map_fail;
- }
-
/* Update mappings not managed by KFD */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 0a4b09709cfb..ec888fc6ead8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -819,7 +819,7 @@ retry:
p->bytes_moved += ctx.bytes_moved;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
- amdgpu_bo_in_cpu_visible_vram(bo))
+ amdgpu_res_cpu_visible(adev, bo->tbo.resource))
p->bytes_moved_vis += ctx.bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index aa16d51dd842..7753a2e64d41 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4135,18 +4135,22 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->ip_blocks[i].status.hw = true;
}
}
+ } else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
+ !amdgpu_device_has_display_hardware(adev)) {
+ r = psp_gpu_reset(adev);
} else {
- tmp = amdgpu_reset_method;
- /* It should do a default reset when loading or reloading the driver,
- * regardless of the module parameter reset_method.
- */
- amdgpu_reset_method = AMD_RESET_METHOD_NONE;
- r = amdgpu_asic_reset(adev);
- amdgpu_reset_method = tmp;
- if (r) {
- dev_err(adev->dev, "asic reset on init failed\n");
- goto failed;
- }
+ tmp = amdgpu_reset_method;
+ /* It should do a default reset when loading or reloading the driver,
+ * regardless of the module parameter reset_method.
+ */
+ amdgpu_reset_method = AMD_RESET_METHOD_NONE;
+ r = amdgpu_asic_reset(adev);
+ amdgpu_reset_method = tmp;
+ }
+
+ if (r) {
+ dev_err(adev->dev, "asic reset on init failed\n");
+ goto failed;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index fdd36fb027ab..ac5bf01fe8d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -1896,6 +1896,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
break;
case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 80b9642f2bc4..e4277298cf1a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -195,6 +195,7 @@ int amdgpu_async_gfx_ring = 1;
int amdgpu_mcbp = -1;
int amdgpu_discovery = -1;
int amdgpu_mes;
+int amdgpu_mes_log_enable = 0;
int amdgpu_mes_kiq;
int amdgpu_noretry = -1;
int amdgpu_force_asic_type = -1;
@@ -668,6 +669,15 @@ MODULE_PARM_DESC(mes,
module_param_named(mes, amdgpu_mes, int, 0444);
/**
+ * DOC: mes_log_enable (int)
+ * Enable Micro Engine Scheduler log. This is used to enable/disable MES internal log.
+ * (0 = disabled (default), 1 = enabled)
+ */
+MODULE_PARM_DESC(mes_log_enable,
+ "Enable Micro Engine Scheduler log (0 = disabled (default), 1 = enabled)");
+module_param_named(mes_log_enable, amdgpu_mes_log_enable, int, 0444);
+
+/**
* DOC: mes_kiq (int)
* Enable Micro Engine Scheduler KIQ. This is a new engine pipe for kiq.
* (0 = disabled (default), 1 = enabled)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 4b3000c21ef2..e4742b65032d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -304,12 +304,15 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
dma_fence_set_error(finished, -ECANCELED);
if (finished->error < 0) {
- DRM_INFO("Skip scheduling IBs!\n");
+ dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)",
+ ring->name);
} else {
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
&fence);
if (r)
- DRM_ERROR("Error scheduling IBs (%d)\n", r);
+ dev_err(adev->dev,
+ "Error scheduling IBs (%d) in ring(%s)", r,
+ ring->name);
}
job->job_run_counter++;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index a98e03e0a51f..1569bef030ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -102,7 +102,10 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
{
int r;
- r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
+ if (!amdgpu_mes_log_enable)
+ return 0;
+
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&adev->mes.event_log_gpu_obj,
&adev->mes.event_log_gpu_addr,
@@ -1129,6 +1132,7 @@ void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
return;
amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
+ del_timer_sync(&ring->fence_drv.fallback_timer);
amdgpu_ring_fini(ring);
kfree(ring);
}
@@ -1549,12 +1553,11 @@ static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
- mem, PAGE_SIZE, false);
+ mem, AMDGPU_MES_LOG_BUFFER_SIZE, false);
return 0;
}
-
DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
#endif
@@ -1565,7 +1568,7 @@ void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *root = minor->debugfs_root;
- if (adev->enable_mes)
+ if (adev->enable_mes && amdgpu_mes_log_enable)
debugfs_create_file("amdgpu_mes_event_log", 0444, root,
adev, &amdgpu_debugfs_mes_event_log_fops);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 7d4f93fea937..4c8fc3117ef8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -52,6 +52,7 @@ enum amdgpu_mes_priority_level {
#define AMDGPU_MES_PROC_CTX_SIZE 0x1000 /* one page area */
#define AMDGPU_MES_GANG_CTX_SIZE 0x1000 /* one page area */
+#define AMDGPU_MES_LOG_BUFFER_SIZE 0x4000 /* Maximu log buffer size for MES */
struct amdgpu_mes_funcs;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 010b0cb7693c..f6d503432a9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -605,6 +605,8 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
else
amdgpu_bo_placement_from_domain(bo, bp->domain);
if (bp->type == ttm_bo_type_kernel)
+ bo->tbo.priority = 2;
+ else if (!(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE))
bo->tbo.priority = 1;
if (!bp->destroy)
@@ -617,8 +619,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
- bo->tbo.resource->mem_type == TTM_PL_VRAM &&
- amdgpu_bo_in_cpu_visible_vram(bo))
+ amdgpu_res_cpu_visible(adev, bo->tbo.resource))
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved);
else
@@ -1242,14 +1243,18 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
* amdgpu_bo_move_notify - notification about a memory move
* @bo: pointer to a buffer object
* @evict: if this move is evicting the buffer from the graphics address space
+ * @new_mem: new resource for backing the BO
*
* Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
* bookkeeping.
* TTM driver callback which is called when ttm moves a buffer.
*/
-void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
+void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
+ bool evict,
+ struct ttm_resource *new_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct ttm_resource *old_mem = bo->resource;
struct amdgpu_bo *abo;
if (!amdgpu_bo_is_amdgpu_bo(bo))
@@ -1261,34 +1266,36 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
amdgpu_bo_kunmap(abo);
if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
- bo->resource->mem_type != TTM_PL_SYSTEM)
+ old_mem && old_mem->mem_type != TTM_PL_SYSTEM)
dma_buf_move_notify(abo->tbo.base.dma_buf);
- /* remember the eviction */
- if (evict)
- atomic64_inc(&adev->num_evictions);
+ /* move_notify is called before move happens */
+ trace_amdgpu_bo_move(abo, new_mem ? new_mem->mem_type : -1,
+ old_mem ? old_mem->mem_type : -1);
}
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
struct amdgpu_mem_stats *stats)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_resource *res = bo->tbo.resource;
uint64_t size = amdgpu_bo_size(bo);
struct drm_gem_object *obj;
unsigned int domain;
bool shared;
/* Abort if the BO doesn't currently have a backing store */
- if (!bo->tbo.resource)
+ if (!res)
return;
obj = &bo->tbo.base;
shared = drm_gem_object_is_shared_for_memory_stats(obj);
- domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
+ domain = amdgpu_mem_type_to_domain(res->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
stats->vram += size;
- if (amdgpu_bo_in_cpu_visible_vram(bo))
+ if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
stats->visible_vram += size;
if (shared)
stats->vram_shared += size;
@@ -1389,10 +1396,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- if (bo->resource->mem_type != TTM_PL_VRAM)
- return 0;
-
- if (amdgpu_bo_in_cpu_visible_vram(abo))
+ if (amdgpu_res_cpu_visible(adev, bo->resource))
return 0;
/* Can't move a pinned BO to visible VRAM */
@@ -1415,7 +1419,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* this should never happen */
if (bo->resource->mem_type == TTM_PL_VRAM &&
- !amdgpu_bo_in_cpu_visible_vram(abo))
+ !amdgpu_res_cpu_visible(adev, bo->resource))
return VM_FAULT_SIGBUS;
ttm_bo_move_to_lru_tail_unlocked(bo);
@@ -1579,6 +1583,7 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
*/
u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct dma_buf_attachment *attachment;
struct dma_buf *dma_buf;
const char *placement;
@@ -1587,10 +1592,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
if (dma_resv_trylock(bo->tbo.base.resv)) {
unsigned int domain;
+
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
- if (amdgpu_bo_in_cpu_visible_vram(bo))
+ if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
placement = "VRAM VISIBLE";
else
placement = "VRAM";
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index be679c42b0b8..bc42ccbde659 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -251,28 +251,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
}
/**
- * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
- */
-static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct amdgpu_res_cursor cursor;
-
- if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
- return false;
-
- amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
- while (cursor.remaining) {
- if (cursor.start < adev->gmc.visible_vram_size)
- return true;
-
- amdgpu_res_next(&cursor, cursor.size);
- }
-
- return false;
-}
-
-/**
* amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
*/
static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
@@ -350,7 +328,9 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
size_t buffer_size, uint32_t *metadata_size,
uint64_t *flags);
-void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict);
+void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
+ bool evict,
+ struct ttm_resource *new_mem);
void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index fc418e670fda..29c197c00018 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -133,7 +133,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
- amdgpu_bo_in_cpu_visible_vram(abo)) {
+ amdgpu_res_cpu_visible(adev, bo->resource)) {
/* Try evicting to the CPU inaccessible part of VRAM
* first, but only set GTT as busy placement, so this
@@ -403,40 +403,55 @@ error:
return r;
}
-/*
- * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
+/**
+ * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
+ * @adev: amdgpu device
+ * @res: the resource to check
*
- * Called by amdgpu_bo_move()
+ * Returns: true if the full resource is CPU visible, false otherwise.
*/
-static bool amdgpu_mem_visible(struct amdgpu_device *adev,
- struct ttm_resource *mem)
+bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
+ struct ttm_resource *res)
{
- u64 mem_size = (u64)mem->size;
struct amdgpu_res_cursor cursor;
- u64 end;
- if (mem->mem_type == TTM_PL_SYSTEM ||
- mem->mem_type == TTM_PL_TT)
+ if (!res)
+ return false;
+
+ if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
+ res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL)
return true;
- if (mem->mem_type != TTM_PL_VRAM)
+
+ if (res->mem_type != TTM_PL_VRAM)
return false;
- amdgpu_res_first(mem, 0, mem_size, &cursor);
- end = cursor.start + cursor.size;
+ amdgpu_res_first(res, 0, res->size, &cursor);
while (cursor.remaining) {
+ if ((cursor.start + cursor.size) > adev->gmc.visible_vram_size)
+ return false;
amdgpu_res_next(&cursor, cursor.size);
+ }
- if (!cursor.remaining)
- break;
+ return true;
+}
- /* ttm_resource_ioremap only supports contiguous memory */
- if (end != cursor.start)
- return false;
+/*
+ * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
+ *
+ * Called by amdgpu_bo_move()
+ */
+static bool amdgpu_res_copyable(struct amdgpu_device *adev,
+ struct ttm_resource *mem)
+{
+ if (!amdgpu_res_cpu_visible(adev, mem))
+ return false;
- end = cursor.start + cursor.size;
- }
+ /* ttm_resource_ioremap only supports contiguous memory */
+ if (mem->mem_type == TTM_PL_VRAM &&
+ !(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
+ return false;
- return end <= adev->gmc.visible_vram_size;
+ return true;
}
/*
@@ -466,14 +481,16 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
bo->ttm == NULL)) {
+ amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_bo_move_null(bo, new_mem);
- goto out;
+ return 0;
}
if (old_mem->mem_type == TTM_PL_SYSTEM &&
(new_mem->mem_type == TTM_PL_TT ||
new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
+ amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_bo_move_null(bo, new_mem);
- goto out;
+ return 0;
}
if ((old_mem->mem_type == TTM_PL_TT ||
old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
@@ -483,9 +500,10 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return r;
amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
+ amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem);
- goto out;
+ return 0;
}
if (old_mem->mem_type == AMDGPU_PL_GDS ||
@@ -497,8 +515,9 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
new_mem->mem_type == AMDGPU_PL_OA ||
new_mem->mem_type == AMDGPU_PL_DOORBELL) {
/* Nothing to save here */
+ amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_bo_move_null(bo, new_mem);
- goto out;
+ return 0;
}
if (bo->type == ttm_bo_type_device &&
@@ -510,27 +529,28 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
}
- if (adev->mman.buffer_funcs_enabled) {
- if (((old_mem->mem_type == TTM_PL_SYSTEM &&
- new_mem->mem_type == TTM_PL_VRAM) ||
- (old_mem->mem_type == TTM_PL_VRAM &&
- new_mem->mem_type == TTM_PL_SYSTEM))) {
- hop->fpfn = 0;
- hop->lpfn = 0;
- hop->mem_type = TTM_PL_TT;
- hop->flags = TTM_PL_FLAG_TEMPORARY;
- return -EMULTIHOP;
- }
+ if (adev->mman.buffer_funcs_enabled &&
+ ((old_mem->mem_type == TTM_PL_SYSTEM &&
+ new_mem->mem_type == TTM_PL_VRAM) ||
+ (old_mem->mem_type == TTM_PL_VRAM &&
+ new_mem->mem_type == TTM_PL_SYSTEM))) {
+ hop->fpfn = 0;
+ hop->lpfn = 0;
+ hop->mem_type = TTM_PL_TT;
+ hop->flags = TTM_PL_FLAG_TEMPORARY;
+ return -EMULTIHOP;
+ }
+ amdgpu_bo_move_notify(bo, evict, new_mem);
+ if (adev->mman.buffer_funcs_enabled)
r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
- } else {
+ else
r = -ENODEV;
- }
if (r) {
/* Check that all memory is CPU accessible */
- if (!amdgpu_mem_visible(adev, old_mem) ||
- !amdgpu_mem_visible(adev, new_mem)) {
+ if (!amdgpu_res_copyable(adev, old_mem) ||
+ !amdgpu_res_copyable(adev, new_mem)) {
pr_err("Move buffer fallback to memcpy unavailable\n");
return r;
}
@@ -540,11 +560,10 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return r;
}
- trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
-out:
- /* update statistics */
+ /* update statistics after the move */
+ if (evict)
+ atomic64_inc(&adev->num_evictions);
atomic64_add(bo->base.size, &adev->num_bytes_moved);
- amdgpu_bo_move_notify(bo, evict);
return 0;
}
@@ -557,7 +576,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- size_t bus_size = (size_t)mem->size;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
@@ -568,9 +586,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
break;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
- /* check if it's visible */
- if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
- return -EINVAL;
if (adev->mman.aper_base_kaddr &&
mem->placement & TTM_PL_FLAG_CONTIGUOUS)
@@ -1548,7 +1563,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
static void
amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
{
- amdgpu_bo_move_notify(bo, false);
+ amdgpu_bo_move_notify(bo, false, NULL);
}
static struct ttm_device_funcs amdgpu_bo_driver = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 65ec82141a8e..32cf6b6f6efd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -139,6 +139,9 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start);
+bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
+ struct ttm_resource *res);
+
int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_fini(struct amdgpu_device *adev);
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
index 0df97c3e3a70..f7c73533e336 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
@@ -774,6 +774,9 @@ static int umsch_mm_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_in_reset(adev) || adev->in_s0ix || adev->in_suspend)
+ return 0;
+
return umsch_mm_test(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 4299ce386322..94089069c9ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1613,6 +1613,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_map(bo_va, mapping);
}
+/* Validate operation parameters to prevent potential abuse */
+static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo,
+ uint64_t saddr,
+ uint64_t offset,
+ uint64_t size)
+{
+ uint64_t tmp, lpfn;
+
+ if (saddr & AMDGPU_GPU_PAGE_MASK
+ || offset & AMDGPU_GPU_PAGE_MASK
+ || size & AMDGPU_GPU_PAGE_MASK)
+ return -EINVAL;
+
+ if (check_add_overflow(saddr, size, &tmp)
+ || check_add_overflow(offset, size, &tmp)
+ || size == 0 /* which also leads to end < begin */)
+ return -EINVAL;
+
+ /* make sure object fit at this offset */
+ if (bo && offset + size > amdgpu_bo_size(bo))
+ return -EINVAL;
+
+ /* Ensure last pfn not exceed max_pfn */
+ lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
+ if (lpfn >= adev->vm_manager.max_pfn)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* amdgpu_vm_bo_map - map bo inside a vm
*
@@ -1639,21 +1670,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
+ int r;
- /* validate the parameters */
- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
- return -EINVAL;
- if (saddr + size <= saddr || offset + size <= offset)
- return -EINVAL;
-
- /* make sure object fit at this offset */
- eaddr = saddr + size - 1;
- if ((bo && offset + size > amdgpu_bo_size(bo)) ||
- (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
- return -EINVAL;
+ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
+ if (r)
+ return r;
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
if (tmp) {
@@ -1706,17 +1730,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
uint64_t eaddr;
int r;
- /* validate the parameters */
- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
- return -EINVAL;
- if (saddr + size <= saddr || offset + size <= offset)
- return -EINVAL;
-
- /* make sure object fit at this offset */
- eaddr = saddr + size - 1;
- if ((bo && offset + size > amdgpu_bo_size(bo)) ||
- (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
- return -EINVAL;
+ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
+ if (r)
+ return r;
/* Allocate all the needed memory */
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
@@ -1730,7 +1746,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
}
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
mapping->start = saddr;
mapping->last = eaddr;
@@ -1817,10 +1833,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
LIST_HEAD(removed);
uint64_t eaddr;
+ int r;
+
+ r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
+ if (r)
+ return r;
- eaddr = saddr + size - 1;
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
/* Allocate all the needed memory */
before = kzalloc(sizeof(*before), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 6695481f870f..c23d97d34b7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -205,7 +205,7 @@ disable_dpm:
dpm_ctl &= 0xfffffffe; /* Disable DPM */
WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl);
dev_dbg(adev->dev, "%s: disable vpe dpm\n", __func__);
- return 0;
+ return -EINVAL;
}
int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index d6f808acfb17..fbb43ae7624f 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -62,6 +62,11 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
}
+static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
+{
+ return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
+}
+
static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
uint32_t inst_idx, struct amdgpu_ring *ring)
{
@@ -87,7 +92,7 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
case AMDGPU_RING_TYPE_VCN_ENC:
case AMDGPU_RING_TYPE_VCN_JPEG:
ip_blk = AMDGPU_XCP_VCN;
- if (adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
+ if (aqua_vanjaram_xcp_vcn_shared(adev))
inst_mask = 1 << (inst_idx * 2);
break;
default:
@@ -140,10 +145,12 @@ static int aqua_vanjaram_xcp_sched_list_update(
aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
- /* VCN is shared by two partitions under CPX MODE */
+ /* VCN may be shared by two partitions under CPX MODE in certain
+ * configs.
+ */
if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
- ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
- adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
+ aqua_vanjaram_xcp_vcn_shared(adev))
aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index f90905ef32c7..701146d649c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -9186,7 +9186,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
7 + /* PIPELINE_SYNC */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* VM_FLUSH */
+ 4 + /* VM_FLUSH */
8 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */
4 + /* double SWITCH_BUFFER,
@@ -9276,7 +9276,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
7 + /* gfx_v10_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* gfx_v10_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v10_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
.emit_ib = gfx_v10_0_ring_emit_ib_compute,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 1770e496c1b7..f00e05aba46a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -1635,7 +1635,7 @@ static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
}
- active_rb_bitmap |= global_active_rb_bitmap;
+ active_rb_bitmap &= global_active_rb_bitmap;
adev->gfx.config.backend_enable_mask = active_rb_bitmap;
adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
}
@@ -5465,6 +5465,7 @@ static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
/* Make sure that we can't skip the SET_Q_MODE packets when the VM
* changed in any way.
*/
+ ring->set_q_mode_offs = 0;
ring->set_q_mode_ptr = NULL;
}
@@ -6191,7 +6192,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
7 + /* PIPELINE_SYNC */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* VM_FLUSH */
+ 4 + /* VM_FLUSH */
8 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */
5 + /* COND_EXEC */
@@ -6277,7 +6278,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
7 + /* gfx_v11_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* gfx_v11_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
.emit_ib = gfx_v11_0_ring_emit_ib_compute,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 6f97a6d0e6d0..99dbd2341120 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -6981,7 +6981,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
7 + /* gfx_v9_0_emit_mem_sync */
5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
@@ -7019,7 +7018,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
- 2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 072c478665ad..63f281a9984d 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -411,8 +411,11 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
mes_set_hw_res_pkt.oversubscription_timer = 50;
- mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
- mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
+ if (amdgpu_mes_log_enable) {
+ mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
+ mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr =
+ mes->event_log_gpu_addr;
+ }
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 34237a1b1f2e..e708468ac54d 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -368,7 +368,8 @@ static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
- ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
+ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0
+ << (ring->me % adev->sdma.num_inst_per_aid);
sdma_v4_4_2_wait_reg_mem(ring, 0, 1,
adev->nbio.funcs->get_hdp_flush_done_offset(adev),
@@ -1602,19 +1603,9 @@ static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL,
- DRAM_ECC_INT_ENABLE, 0);
- WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
- break;
- /* sdma ecc interrupt is enabled by default
- * driver doesn't need to do anything to
- * enable the interrupt */
- case AMDGPU_IRQ_STATE_ENABLE:
- default:
- break;
- }
+ sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, DRAM_ECC_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 42f4bd250def..da01b524b9f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -280,17 +280,21 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
- ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
-
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
- SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
- SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
- amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
- amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
- amdgpu_ring_write(ring, ref_and_mask); /* reference */
- amdgpu_ring_write(ring, ref_and_mask); /* mask */
- amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
- SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
+ if (ring->me > 1) {
+ amdgpu_asic_flush_hdp(adev, ring);
+ } else {
+ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
+
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
+ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
+ amdgpu_ring_write(ring, ref_and_mask); /* reference */
+ amdgpu_ring_write(ring, ref_and_mask); /* mask */
+ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
+ }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 581a3bd11481..43ca63fe85ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -457,10 +457,8 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
- return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
- return false;
default:
return true;
}
@@ -722,7 +720,10 @@ static int soc21_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_GFX_PG;
- adev->external_rev_id = adev->rev_id + 0x1;
+ if (adev->rev_id == 0)
+ adev->external_rev_id = 0x1;
+ else
+ adev->external_rev_id = adev->rev_id + 0x10;
break;
case IP_VERSION(11, 5, 1):
adev->cg_flags =
@@ -869,10 +870,35 @@ static int soc21_common_suspend(void *handle)
return soc21_common_hw_fini(adev);
}
+static bool soc21_need_reset_on_resume(struct amdgpu_device *adev)
+{
+ u32 sol_reg1, sol_reg2;
+
+ /* Will reset for the following suspend abort cases.
+ * 1) Only reset dGPU side.
+ * 2) S3 suspend got aborted and TOS is active.
+ */
+ if (!(adev->flags & AMD_IS_APU) && adev->in_s3 &&
+ !adev->suspend_complete) {
+ sol_reg1 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
+ msleep(100);
+ sol_reg2 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
+
+ return (sol_reg1 != sol_reg2);
+ }
+
+ return false;
+}
+
static int soc21_common_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (soc21_need_reset_on_resume(adev)) {
+ dev_info(adev->dev, "S3 suspend aborted, resetting...");
+ soc21_asic_reset(adev);
+ }
+
return soc21_common_hw_init(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c
index 84368cf1e175..bd57896ab85d 100644
--- a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c
@@ -225,6 +225,8 @@ static int umsch_mm_v4_0_ring_start(struct amdgpu_umsch_mm *umsch)
WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_SIZE, ring->ring_size);
+ ring->wptr = 0;
+
data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE);
data &= ~(VCN_RB_ENABLE__AUDIO_RB_EN_MASK);
WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data);
diff --git a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
index 769eb8f7bb3c..09315dd5a1ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
@@ -144,6 +144,12 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
WREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL), ret);
}
+ /* setup collaborate mode */
+ vpe_v6_1_set_collaborate_mode(vpe, true);
+ /* setup DPM */
+ if (amdgpu_vpe_configure_dpm(vpe))
+ dev_warn(adev->dev, "VPE failed to enable DPM\n");
+
/*
* For VPE 6.1.1, still only need to add master's offset, and psp will apply it to slave as well.
* Here use instance 0 as master.
@@ -159,11 +165,7 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
adev->vpe.cmdbuf_cpu_addr[0] = f32_offset;
adev->vpe.cmdbuf_cpu_addr[1] = f32_cntl;
- amdgpu_vpe_psp_update_sram(adev);
- vpe_v6_1_set_collaborate_mode(vpe, true);
- amdgpu_vpe_configure_dpm(vpe);
-
- return 0;
+ return amdgpu_vpe_psp_update_sram(adev);
}
vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data;
@@ -196,8 +198,6 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
}
vpe_v6_1_halt(vpe, false);
- vpe_v6_1_set_collaborate_mode(vpe, true);
- amdgpu_vpe_configure_dpm(vpe);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index f9631f4b1a02..1e6cc0bfc432 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -779,8 +779,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
* nodes, but not more than args->num_of_nodes as that is
* the amount of memory allocated by user
*/
- pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
- args->num_of_nodes), GFP_KERNEL);
+ pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures),
+ GFP_KERNEL);
if (!pa)
return -ENOMEM;
@@ -1139,7 +1139,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
goto err_unlock;
}
offset = dev->adev->rmmio_remap.bus_addr;
- if (!offset) {
+ if (!offset || (PAGE_SIZE > 4096)) {
err = -ENOMEM;
goto err_unlock;
}
@@ -2307,7 +2307,7 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
return -EINVAL;
}
offset = pdd->dev->adev->rmmio_remap.bus_addr;
- if (!offset) {
+ if (!offset || (PAGE_SIZE > 4096)) {
pr_err("amdgpu_amdkfd_get_mmio_remap_phys_addr failed\n");
return -ENOMEM;
}
@@ -3349,6 +3349,9 @@ static int kfd_mmio_mmap(struct kfd_node *dev, struct kfd_process *process,
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
+ if (PAGE_SIZE > 4096)
+ return -EINVAL;
+
address = dev->adev->rmmio_remap.bus_addr;
vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 041ec3de55e7..719d6d365e15 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -960,7 +960,6 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{
struct kfd_node *node;
int i;
- int count;
if (!kfd->init_complete)
return;
@@ -968,12 +967,10 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
/* for runtime suspend, skip locking kfd */
if (!run_pm) {
mutex_lock(&kfd_processes_mutex);
- count = ++kfd_locked;
- mutex_unlock(&kfd_processes_mutex);
-
/* For first KFD device suspend all the KFD processes */
- if (count == 1)
+ if (++kfd_locked == 1)
kfd_suspend_all_processes();
+ mutex_unlock(&kfd_processes_mutex);
}
for (i = 0; i < kfd->num_nodes; i++) {
@@ -984,7 +981,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
- int ret, count, i;
+ int ret, i;
if (!kfd->init_complete)
return 0;
@@ -998,12 +995,10 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
/* for runtime resume, skip unlocking kfd */
if (!run_pm) {
mutex_lock(&kfd_processes_mutex);
- count = --kfd_locked;
- mutex_unlock(&kfd_processes_mutex);
-
- WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
- if (count == 0)
+ if (--kfd_locked == 0)
ret = kfd_resume_all_processes();
+ WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
+ mutex_unlock(&kfd_processes_mutex);
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index f4d395e38683..0b655555e167 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -2001,6 +2001,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
dev_err(dev, "HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
while (halt_if_hws_hang)
schedule();
+ kfd_hws_hang(dqm);
return -ETIME;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index bdc01ca9609a..5c8d81bfce7a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -509,10 +509,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
start = start_mgr << PAGE_SHIFT;
end = (last_mgr + 1) << PAGE_SHIFT;
+ r = amdgpu_amdkfd_reserve_mem_limit(node->adev,
+ prange->npages * PAGE_SIZE,
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
+ node->xcp ? node->xcp->id : 0);
+ if (r) {
+ dev_dbg(node->adev->dev, "failed to reserve VRAM, r: %ld\n", r);
+ return -ENOSPC;
+ }
+
r = svm_range_vram_node_new(node, prange, true);
if (r) {
dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
- return r;
+ goto out;
}
ttm_res_offset = (start_mgr - prange->start + prange->offset) << PAGE_SHIFT;
@@ -545,6 +554,11 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
svm_range_vram_node_free(prange);
}
+out:
+ amdgpu_amdkfd_unreserve_mem_limit(node->adev,
+ prange->npages * PAGE_SIZE,
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
+ node->xcp ? node->xcp->id : 0);
return r < 0 ? r : 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 717a60d7a4ea..451bb058cc62 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -819,9 +819,9 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
mutex_lock(&kfd_processes_mutex);
if (kfd_is_locked()) {
- mutex_unlock(&kfd_processes_mutex);
pr_debug("KFD is locked! Cannot create process");
- return ERR_PTR(-EINVAL);
+ process = ERR_PTR(-EINVAL);
+ goto out;
}
/* A prior open of /dev/kfd could have already created the process. */
@@ -829,6 +829,14 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
if (process) {
pr_debug("Process already found\n");
} else {
+ /* If the process just called exec(3), it is possible that the
+ * cleanup of the kfd_process (following the release of the mm
+ * of the old process image) is still in the cleanup work queue.
+ * Make sure to drain any job before trying to recreate any
+ * resource for this process.
+ */
+ flush_workqueue(kfd_process_wq);
+
process = create_process(thread);
if (IS_ERR(process))
goto out;
@@ -1922,6 +1930,8 @@ static int signal_eviction_fence(struct kfd_process *p)
rcu_read_lock();
ef = dma_fence_get_rcu_safe(&p->ef);
rcu_read_unlock();
+ if (!ef)
+ return -EINVAL;
ret = dma_fence_signal(ef);
dma_fence_put(ef);
@@ -1949,10 +1959,9 @@ static void evict_process_worker(struct work_struct *work)
* they are responsible stopping the queues and scheduling
* the restore work.
*/
- if (!signal_eviction_fence(p))
- queue_delayed_work(kfd_restore_wq, &p->restore_work,
- msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
- else
+ if (signal_eviction_fence(p) ||
+ mod_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
kfd_process_restore_queues(p);
pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
@@ -2011,9 +2020,9 @@ static void restore_process_worker(struct work_struct *work)
if (ret) {
pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
p->pasid, PROCESS_BACK_OFF_TIME_MS);
- ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
- msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
- WARN(!ret, "reschedule restore work failed\n");
+ if (mod_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
+ kfd_process_restore_queues(p);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index f0f7f48af413..386875e6eb96 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -3426,7 +3426,7 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
mm, KFD_MIGRATE_TRIGGER_PREFETCH);
*migrated = !r;
- return r;
+ return 0;
}
int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index c51f131eaa2f..bc9eb847ecfe 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1997,9 +1997,8 @@ int kfd_topology_add_device(struct kfd_node *gpu)
HSA_CAP_ASIC_REVISION_MASK);
dev->node_props.location_id = pci_dev_id(gpu->adev->pdev);
- /* On multi-partition nodes, node id = location_id[31:28] */
- if (gpu->kfd->num_nodes > 1)
- dev->node_props.location_id |= (dev->gpu->node_id << 28);
+ if (KFD_GC_VERSION(dev->gpu->kfd) == IP_VERSION(9, 4, 3))
+ dev->node_props.location_id |= dev->gpu->node_id;
dev->node_props.domain = pci_domain_nr(gpu->adev->pdev->bus);
dev->node_props.max_engine_clk_fcompute =
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 71d2d44681b2..d6e71aa808d8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -148,6 +148,9 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
#define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);
+#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB);
+
/* Number of bytes in PSP header for firmware. */
#define PSP_HEADER_BYTES 0x100
@@ -3026,6 +3029,7 @@ static int dm_resume(void *handle)
dc_stream_release(dm_new_crtc_state->stream);
dm_new_crtc_state->stream = NULL;
}
+ dm_new_crtc_state->base.color_mgmt_changed = true;
}
for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
@@ -3044,6 +3048,10 @@ static int dm_resume(void *handle)
/* Do mst topology probing after resuming cached state*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
aconnector->mst_root)
@@ -4529,15 +4537,18 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
/* Determine whether to enable Replay support by default. */
if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
- case IP_VERSION(3, 1, 4):
- case IP_VERSION(3, 1, 5):
- case IP_VERSION(3, 1, 6):
- case IP_VERSION(3, 2, 0):
- case IP_VERSION(3, 2, 1):
- case IP_VERSION(3, 5, 0):
- case IP_VERSION(3, 5, 1):
- replay_feature_enabled = true;
- break;
+/*
+ * Disabled by default due to https://gitlab.freedesktop.org/drm/amd/-/issues/3344
+ * case IP_VERSION(3, 1, 4):
+ * case IP_VERSION(3, 1, 5):
+ * case IP_VERSION(3, 1, 6):
+ * case IP_VERSION(3, 2, 0):
+ * case IP_VERSION(3, 2, 1):
+ * case IP_VERSION(3, 5, 0):
+ * case IP_VERSION(3, 5, 1):
+ * replay_feature_enabled = true;
+ * break;
+ */
default:
replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
break;
@@ -4820,9 +4831,11 @@ static int dm_init_microcode(struct amdgpu_device *adev)
fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
break;
case IP_VERSION(3, 5, 0):
- case IP_VERSION(3, 5, 1):
fw_name_dmub = FIRMWARE_DCN_35_DMUB;
break;
+ case IP_VERSION(3, 5, 1):
+ fw_name_dmub = FIRMWARE_DCN_351_DMUB;
+ break;
default:
/* ASIC doesn't support DMUB. */
return 0;
@@ -5921,6 +5934,9 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
&aconnector->base.probed_modes :
&aconnector->base.modes;
+ if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return NULL;
+
if (aconnector->freesync_vid_base.clock != 0)
return &aconnector->freesync_vid_base;
@@ -6306,19 +6322,16 @@ create_stream_for_sink(struct drm_connector *connector,
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
- if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ stream->signal == SIGNAL_TYPE_EDP) {
//
// should decide stream support vsc sdp colorimetry capability
// before building vsc info packet
//
- stream->use_vsc_sdp_for_colorimetry = false;
- if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- stream->use_vsc_sdp_for_colorimetry =
- aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
- } else {
- if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
- stream->use_vsc_sdp_for_colorimetry = true;
- }
+ stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
+ stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED;
+
if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
tf = TRANSFER_FUNC_GAMMA_22;
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
@@ -8762,10 +8775,10 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev,
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
continue;
+notify:
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
-notify:
aconnector = to_amdgpu_dm_connector(connector);
mutex_lock(&adev->dm.audio_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index eee4945653e2..c7715a17f388 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1495,7 +1495,9 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -1596,7 +1598,9 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -1681,7 +1685,9 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -1780,7 +1786,9 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -1865,7 +1873,9 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -1964,7 +1974,9 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -2045,7 +2057,9 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -2141,7 +2155,9 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -2220,7 +2236,9 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -2276,7 +2294,9 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -2347,7 +2367,9 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
@@ -2418,7 +2440,9 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
- pipe_ctx->stream->link == aconnector->dc_link)
+ pipe_ctx->stream->link == aconnector->dc_link &&
+ pipe_ctx->stream->sink &&
+ pipe_ctx->stream->sink == aconnector->dc_sink)
break;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 941e96f100f4..cb31a699c662 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -1219,8 +1219,10 @@ static bool is_dsc_need_re_compute(
if (dc_link->type != dc_connection_mst_branch)
return false;
- if (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT ||
- dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))
+ /* add a check for older MST DSC with no virtual DPCDs */
+ if (needs_dsc_aux_workaround(dc_link) &&
+ (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT ||
+ dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)))
return false;
for (i = 0; i < MAX_PIPES; i++)
@@ -1240,7 +1242,15 @@ static bool is_dsc_need_re_compute(
continue;
aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context;
- if (!aconnector)
+ if (!aconnector || !aconnector->dsc_aux)
+ continue;
+
+ /*
+ * check if cached virtual MST DSC caps are available and DSC is supported
+ * as per specifications in their Virtual DPCD registers.
+ */
+ if (!(aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported ||
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))
continue;
stream_on_link[new_stream_on_link_num] = aconnector;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
index 16e72d623630..08c494a7a21b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
@@ -76,10 +76,8 @@ static int amdgpu_dm_wb_encoder_atomic_check(struct drm_encoder *encoder,
static int amdgpu_dm_wb_connector_get_modes(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
-
- return drm_add_modes_noedid(connector, dev->mode_config.max_width,
- dev->mode_config.max_height);
+ /* Maximum resolution supported by DWB */
+ return drm_add_modes_noedid(connector, 3840, 2160);
}
static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 05f392501c0a..ab31643b1096 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -2948,6 +2948,7 @@ static enum bp_result construct_integrated_info(
result = get_integrated_info_v2_1(bp, info);
break;
case 2:
+ case 3:
result = get_integrated_info_v2_2(bp, info);
break;
default:
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index 644da4637320..5506cf9b3672 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -145,6 +145,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
*/
clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
if (safe_to_lower) {
+ if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
+ dcn315_smu_set_dtbclk(clk_mgr, false);
+ clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+ }
/* check that we're not already in lower */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
display_count = dcn315_get_active_display_cnt_wa(dc, context);
@@ -160,6 +164,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
}
}
} else {
+ if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
+ dcn315_smu_set_dtbclk(clk_mgr, true);
+ clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+ }
/* check that we're not already in D0 */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
union display_idle_optimization_u idle_info = { 0 };
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 12f3e8aa46d8..6ad4f4efec5d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -99,20 +99,25 @@ static int dcn316_get_active_display_cnt_wa(
return display_count;
}
-static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
+static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
+ bool safe_to_lower, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
- struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe = safe_to_lower
+ ? &context->res_ctx.pipe_ctx[i]
+ : &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
- dc_is_virtual_signal(pipe->stream->signal))) {
+ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
+ !pipe->stream->link_enc)) {
if (disable) {
- pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+ if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
+ pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+
reset_sync_context_for_pipe(dc, context, i);
} else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
@@ -207,11 +212,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn316_disable_otg_wa(clk_mgr_base, context, true);
+ dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn316_disable_otg_wa(clk_mgr_base, context, false);
+ dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index bec252e1dd27..e506e4f969ca 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -712,8 +712,12 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
* since we calculate mode support based on softmax being the max UCLK
* frequency.
*/
- dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
+ if (dc->debug.disable_dc_mode_overwrite) {
+ dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
+ } else
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
+ dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
} else {
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
}
@@ -746,8 +750,13 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
/* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
if (clk_mgr_base->clks.p_state_change_support &&
(update_uclk || !clk_mgr_base->clks.prev_p_state_change_support) &&
- !dc->work_arounds.clock_update_disable_mask.uclk)
+ !dc->work_arounds.clock_update_disable_mask.uclk) {
+ if (dc->clk_mgr->dc_mode_softmax_enabled && dc->debug.disable_dc_mode_overwrite)
+ dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK,
+ max((int)dc->clk_mgr->bw_params->dc_mode_softmax_memclk, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz)));
+
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
+ }
if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
clk_mgr_base->clks.num_ways > new_clocks->num_ways) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 101fe96287cb..d9c5692c86c2 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -73,6 +73,12 @@
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
+#define regCLK5_0_CLK5_spll_field_8 0x464b
+#define regCLK5_0_CLK5_spll_field_8_BASE_IDX 0
+
+#define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT 0xd
+#define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
+
#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
#define REG(reg_name) \
@@ -411,6 +417,17 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs
{
}
+static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dc_context *ctx = clk_mgr->base.ctx;
+ uint32_t ssc_enable;
+
+ REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable);
+
+ return ssc_enable == 1;
+}
+
static void init_clk_states(struct clk_mgr *clk_mgr)
{
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
@@ -428,7 +445,16 @@ static void init_clk_states(struct clk_mgr *clk_mgr)
void dcn35_init_clocks(struct clk_mgr *clk_mgr)
{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
init_clk_states(clk_mgr);
+
+ // to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
+ if (dcn35_is_spll_ssc_enabled(clk_mgr))
+ clk_mgr->dp_dto_source_clock_in_khz =
+ dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz);
+ else
+ clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+
}
static struct clk_bw_params dcn35_bw_params = {
.vram_type = Ddr4MemType,
@@ -517,6 +543,28 @@ static DpmClocks_t_dcn35 dummy_clocks;
static struct dcn35_watermarks dummy_wms = { 0 };
+static struct dcn35_ss_info_table ss_info_table = {
+ .ss_divider = 1000,
+ .ss_percentage = {0, 0, 375, 375, 375}
+};
+
+static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
+{
+ struct dc_context *ctx = clk_mgr->base.ctx;
+ uint32_t clock_source;
+
+ REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
+ // If it's DFS mode, clock_source is 0.
+ if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) {
+ clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
+
+ if (clk_mgr->dprefclk_ss_percentage != 0) {
+ clk_mgr->ss_on_dprefclk = true;
+ clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
+ }
+ }
+}
+
static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table)
{
int i, num_valid_sets;
@@ -1061,6 +1109,8 @@ void dcn35_clk_mgr_construct(
dce_clock_read_ss_info(&clk_mgr->base);
/*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/
+ dcn35_read_ss_info_from_lut(&clk_mgr->base);
+
clk_mgr->base.base.bw_params = &dcn35_bw_params;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 03b554e912a2..d68c83e40d4d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1801,6 +1801,9 @@ bool dc_validate_boot_timing(const struct dc *dc,
return false;
}
+ if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)
+ return false;
+
if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 5cc7f8da209c..61986e5cb491 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -436,6 +436,15 @@ bool dc_state_add_plane(
goto out;
}
+ if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
+ /* ODM combine could prevent us from supporting more planes
+ * we will reset ODM slice count back to 1 when all planes have
+ * been removed to maximize the amount of planes supported when
+ * new planes are added.
+ */
+ resource_update_pipes_for_stream_with_slice_count(
+ state, dc->current_state, dc->res_pool, stream, 1);
+
otg_master_pipe = resource_get_otg_master_for_stream(
&state->res_ctx, stream);
if (otg_master_pipe)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 970644b695cd..b5e0289d2fe8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -976,7 +976,10 @@ static bool dcn31_program_pix_clk(
struct bp_pixel_clock_parameters bp_pc_params = {0};
enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
- if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
+ // Apply ssed(spread spectrum) dpref clock for edp only.
+ if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0
+ && pix_clk_params->signal_type == SIGNAL_TYPE_EDP
+ && encoding == DP_8b_10b_ENCODING)
dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
// For these signal types Driver to program DP_DTO without calling VBIOS Command table
if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) {
@@ -1093,9 +1096,6 @@ static bool get_pixel_clk_frequency_100hz(
unsigned int modulo_hz = 0;
unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz;
- if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
- dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
-
if (clock_source->id == CLOCK_SOURCE_ID_DP_DTO) {
clock_hz = REG_READ(PHASE[inst]);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
index 5b7ad38f85e0..65e45a0b4ff3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
@@ -395,6 +395,12 @@ void dcn31_hpo_dp_link_enc_set_throttled_vcp_size(
x),
25));
+ // If y rounds up to integer, carry it over to x.
+ if (y >> 25) {
+ x += 1;
+ y = 0;
+ }
+
switch (stream_encoder_inst) {
case 0:
REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
index e224a028d68a..8a0460e86309 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
@@ -248,14 +248,12 @@ void dcn32_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
- enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
-
- enc10->base.features = *enc_features;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
- if (enc10->base.connector.id == CONNECTOR_ID_USBC)
- enc10->base.features.flags.bits.DP_IS_USB_C = 1;
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+
+ enc10->base.features = *enc_features;
enc10->base.transmitter = init_data->transmitter;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
index 81e349d5835b..da94e5309fba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
@@ -184,6 +184,8 @@ void dcn35_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
+ if (enc10->base.connector.id == CONNECTOR_ID_USBC)
+ enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
@@ -238,8 +240,6 @@ void dcn35_link_encoder_construct(
}
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
- if (enc10->base.connector.id == CONNECTOR_ID_USBC)
- enc10->base.features.flags.bits.DP_IS_USB_C = 1;
if (bp_funcs->get_connector_speed_cap_info)
result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index deb6d162a2d5..7307b7b8d8ad 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -291,6 +291,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
.do_urgent_latency_adjustment = false,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+ .dispclk_dppclk_vco_speed_mhz = 2400.0,
.num_chans = 4,
.dummy_pstate_latency_us = 10.0
};
@@ -438,6 +439,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
.do_urgent_latency_adjustment = false,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+ .dispclk_dppclk_vco_speed_mhz = 2500.0,
};
void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 21e0eef3269b..53e40d3c48d4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.dcn_downspread_percent = 0.5,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = 0,
+ .do_urgent_latency_adjustment = 1,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
};
void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index a5560b3fc39b..9067ca78f851 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -638,22 +638,43 @@ void dcn35_power_down_on_boot(struct dc *dc)
bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
{
- struct dc_link *edp_links[MAX_NUM_EDP];
- int i, edp_num;
if (dc->debug.dmcub_emulation)
return true;
if (enable) {
- dc_get_edp_links(dc, edp_links, &edp_num);
- if (edp_num == 0 || edp_num > 1)
- return false;
+ uint32_t num_active_edp = 0;
+ int i;
for (i = 0; i < dc->current_state->stream_count; ++i) {
struct dc_stream_state *stream = dc->current_state->streams[i];
+ struct dc_link *link = stream->link;
+ bool is_psr = link && !link->panel_config.psr.disable_psr &&
+ (link->psr_settings.psr_version == DC_PSR_VERSION_1 ||
+ link->psr_settings.psr_version == DC_PSR_VERSION_SU_1);
+ bool is_replay = link && link->replay_settings.replay_feature_enabled;
+
+ /* Ignore streams that disabled. */
+ if (stream->dpms_off)
+ continue;
+
+ /* Active external displays block idle optimizations. */
+ if (!dc_is_embedded_signal(stream->signal))
+ return false;
+
+ /* If not PWRSEQ0 can't enter idle optimizations */
+ if (link && link->link_index != 0)
+ return false;
- if (!stream->dpms_off && !dc_is_embedded_signal(stream->signal))
+ /* Check for panel power features required for idle optimizations. */
+ if (!is_psr && !is_replay)
return false;
+
+ num_active_edp += 1;
}
+
+ /* If more than one active eDP then disallow. */
+ if (num_active_edp > 1)
+ return false;
}
// TODO: review other cases when idle optimization is allowed
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index 5491b707cec8..5a965c26bf20 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -270,7 +270,7 @@ static void set_usb4_req_bw_req(struct dc_link *link, int req_bw)
/* Error check whether requested and allocated are equal */
req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
- if (req_bw == link->dpia_bw_alloc_config.allocated_bw) {
+ if (req_bw && (req_bw == link->dpia_bw_alloc_config.allocated_bw)) {
DC_LOG_ERROR("%s: Request bw equals to allocated bw for link(%d)\n",
__func__, link->link_index);
}
@@ -341,6 +341,14 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
ret = true;
init_usb4_bw_struct(link);
link->dpia_bw_alloc_config.bw_alloc_enabled = true;
+
+ /*
+ * During DP tunnel creation, CM preallocates BW and reduces estimated BW of other
+ * DPIA. CM release preallocation only when allocation is complete. Do zero alloc
+ * to make the CM to release preallocation and update estimated BW correctly for
+ * all DPIAs per host router
+ */
+ link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
index f07a4c7e48bc..52eab8fccb7f 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
@@ -267,9 +267,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc)
OTG_V_TOTAL_MAX_SEL, 1,
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
-
- // Setup manual flow control for EOF via TRIG_A
- optc->funcs->setup_manual_trigger(optc);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index a2387cea1af9..622214b365a2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -2449,6 +2449,7 @@ static bool dcn20_resource_construct(
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
+ dc->caps.dmcub_support = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index f09b9d49297e..bbd0169010c2 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -4261,6 +4261,13 @@ static int amdgpu_od_set_init(struct amdgpu_device *adev)
}
}
+ /*
+ * If gpu_od is the only member in the list, that means gpu_od is an
+ * empty directory, so remove it.
+ */
+ if (list_is_singular(&adev->pm.od_kobj_list))
+ goto err_out;
+
return 0;
err_out:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 246b211b1e85..65333141b1c1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -735,7 +735,7 @@ static int smu_early_init(void *handle)
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
smu->is_apu = false;
- smu->smu_baco.state = SMU_BACO_STATE_EXIT;
+ smu->smu_baco.state = SMU_BACO_STATE_NONE;
smu->smu_baco.platform_support = false;
smu->user_dpm_profile.fan_mode = -1;
@@ -1966,10 +1966,25 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
return 0;
}
+static int smu_reset_mp1_state(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if ((!adev->in_runpm) && (!adev->in_suspend) &&
+ (!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 10) &&
+ !amdgpu_device_has_display_hardware(adev))
+ ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
+
+ return ret;
+}
+
static int smu_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
@@ -1987,7 +2002,15 @@ static int smu_hw_fini(void *handle)
adev->pm.dpm_enabled = false;
- return smu_smc_hw_cleanup(smu);
+ ret = smu_smc_hw_cleanup(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_reset_mp1_state(smu);
+ if (ret)
+ return ret;
+
+ return 0;
}
static void smu_late_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index a870bdd49a4e..1fa81575788c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -424,6 +424,7 @@ enum smu_reset_mode {
enum smu_baco_state {
SMU_BACO_STATE_ENTER = 0,
SMU_BACO_STATE_EXIT,
+ SMU_BACO_STATE_NONE,
};
struct smu_baco_context {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
index 5bb7a63c0602..97522c085258 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
@@ -144,6 +144,37 @@ typedef struct {
uint32_t MaxGfxClk;
} DpmClocks_t;
+//Freq in MHz
+//Voltage in milli volts with 2 fractional bits
+typedef struct {
+ uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+ uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
+ uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
+ uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+ uint32_t VClocks0[NUM_VCN_DPM_LEVELS];
+ uint32_t VClocks1[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks0[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks1[NUM_VCN_DPM_LEVELS];
+ uint32_t VPEClocks[NUM_VPE_DPM_LEVELS];
+ uint32_t FclkClocks_Freq[NUM_FCLK_DPM_LEVELS];
+ uint32_t FclkClocks_Voltage[NUM_FCLK_DPM_LEVELS];
+ uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
+ MemPstateTable_t MemPstateTable[NUM_MEM_PSTATE_LEVELS];
+
+ uint8_t NumDcfClkLevelsEnabled;
+ uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
+ uint8_t NumSocClkLevelsEnabled;
+ uint8_t Vcn0ClkLevelsEnabled; //Applies to both Vclk0 and Dclk0
+ uint8_t Vcn1ClkLevelsEnabled; //Applies to both Vclk1 and Dclk1
+ uint8_t VpeClkLevelsEnabled;
+ uint8_t NumMemPstatesEnabled;
+ uint8_t NumFclkLevelsEnabled;
+ uint8_t spare;
+
+ uint32_t MinGfxClk;
+ uint32_t MaxGfxClk;
+} DpmClocks_t_v14_0_1;
+
typedef struct {
uint16_t CoreFrequency[16]; //Target core frequency [MHz]
uint16_t CorePower[16]; //CAC calculated core power [mW]
@@ -224,7 +255,7 @@ typedef enum {
#define TABLE_CUSTOM_DPM 2 // Called by Driver
#define TABLE_BIOS_GPIO_CONFIG 3 // Called by BIOS
#define TABLE_DPMCLOCKS 4 // Called by Driver and VBIOS
-#define TABLE_SPARE0 5 // Unused
+#define TABLE_MOMENTARY_PM 5 // Called by Tools
#define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log
#define TABLE_SMU_METRICS 7 // Called by Driver and SMF/PMF
#define TABLE_COUNT 8
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h
index 356e0f57a426..ddb625860083 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h
@@ -42,7 +42,7 @@
#define FEATURE_EDC_BIT 7
#define FEATURE_PLL_POWER_DOWN_BIT 8
#define FEATURE_VDDOFF_BIT 9
-#define FEATURE_VCN_DPM_BIT 10
+#define FEATURE_VCN_DPM_BIT 10 /* this is for both VCN0 and VCN1 */
#define FEATURE_DS_MPM_BIT 11
#define FEATURE_FCLK_DPM_BIT 12
#define FEATURE_SOCCLK_DPM_BIT 13
@@ -56,9 +56,9 @@
#define FEATURE_DS_GFXCLK_BIT 21
#define FEATURE_DS_SOCCLK_BIT 22
#define FEATURE_DS_LCLK_BIT 23
-#define FEATURE_LOW_POWER_DCNCLKS_BIT 24 // for all DISP clks
+#define FEATURE_LOW_POWER_DCNCLKS_BIT 24
#define FEATURE_DS_SHUBCLK_BIT 25
-#define FEATURE_SPARE0_BIT 26 //SPARE
+#define FEATURE_RESERVED0_BIT 26
#define FEATURE_ZSTATES_BIT 27
#define FEATURE_IOMMUL2_PG_BIT 28
#define FEATURE_DS_FCLK_BIT 29
@@ -66,8 +66,8 @@
#define FEATURE_DS_MP1CLK_BIT 31
#define FEATURE_WHISPER_MODE_BIT 32
#define FEATURE_SMU_LOW_POWER_BIT 33
-#define FEATURE_SMART_L3_RINSER_BIT 34
-#define FEATURE_SPARE1_BIT 35 //SPARE
+#define FEATURE_RESERVED1_BIT 34 /* v14_0_0 SMART_L3_RINSER; v14_0_1 RESERVED1 */
+#define FEATURE_GFX_DEM_BIT 35 /* v14_0_0 SPARE; v14_0_1 GFX_DEM */
#define FEATURE_PSI_BIT 36
#define FEATURE_PROCHOT_BIT 37
#define FEATURE_CPUOFF_BIT 38
@@ -77,11 +77,11 @@
#define FEATURE_PERF_LIMIT_BIT 42
#define FEATURE_CORE_DLDO_BIT 43
#define FEATURE_DVO_BIT 44
-#define FEATURE_DS_VCN_BIT 45
+#define FEATURE_DS_VCN_BIT 45 /* v14_0_1 this is for both VCN0 and VCN1 */
#define FEATURE_CPPC_BIT 46
#define FEATURE_CPPC_PREFERRED_CORES 47
#define FEATURE_DF_CSTATES_BIT 48
-#define FEATURE_SPARE2_BIT 49 //SPARE
+#define FEATURE_FAST_PSTATE_CLDO_BIT 49 /* v14_0_0 SPARE */
#define FEATURE_ATHUB_PG_BIT 50
#define FEATURE_VDDOFF_ECO_BIT 51
#define FEATURE_ZSTATES_ECO_BIT 52
@@ -93,8 +93,8 @@
#define FEATURE_DS_IPUCLK_BIT 58
#define FEATURE_DS_VPECLK_BIT 59
#define FEATURE_VPE_DPM_BIT 60
-#define FEATURE_SPARE_61 61
-#define FEATURE_FP_DIDT 62
+#define FEATURE_SMART_L3_RINSER_BIT 61 /* v14_0_0 SPARE*/
+#define FEATURE_PCC_BIT 62 /* v14_0_0 FP_DIDT v14_0_1 PCC_BIT */
#define NUM_FEATURES 63
// Firmware Header/Footer
@@ -151,6 +151,43 @@ typedef struct {
// MP1_EXT_SCRATCH7 = RTOS Current Job
} FwStatus_t;
+typedef struct {
+ // MP1_EXT_SCRATCH0
+ uint32_t DpmHandlerID : 8;
+ uint32_t ActivityMonitorID : 8;
+ uint32_t DpmTimerID : 8;
+ uint32_t DpmHubID : 4;
+ uint32_t DpmHubTask : 4;
+ // MP1_EXT_SCRATCH1
+ uint32_t CclkSyncStatus : 8;
+ uint32_t ZstateStatus : 4;
+ uint32_t Cpu1VddOff : 4;
+ uint32_t DstateFun : 4;
+ uint32_t DstateDev : 4;
+ uint32_t GfxOffStatus : 2;
+ uint32_t Cpu0Off : 2;
+ uint32_t Cpu1Off : 2;
+ uint32_t Cpu0VddOff : 2;
+ // MP1_EXT_SCRATCH2
+ uint32_t P2JobHandler :32;
+ // MP1_EXT_SCRATCH3
+ uint32_t PostCode :32;
+ // MP1_EXT_SCRATCH4
+ uint32_t MsgPortBusy :15;
+ uint32_t RsmuPmiP1Pending : 1;
+ uint32_t RsmuPmiP2PendingCnt : 8;
+ uint32_t DfCstateExitPending : 1;
+ uint32_t Pc6EntryPending : 1;
+ uint32_t Pc6ExitPending : 1;
+ uint32_t WarmResetPending : 1;
+ uint32_t Mp0ClkPending : 1;
+ uint32_t InWhisperMode : 1;
+ uint32_t spare2 : 2;
+ // MP1_EXT_SCRATCH5
+ uint32_t IdleMask :32;
+ // MP1_EXT_SCRATCH6 = RTOS threads' status
+ // MP1_EXT_SCRATCH7 = RTOS Current Job
+} FwStatus_t_v14_0_1;
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
index ca7ce4251482..c4dc5881d8df 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
@@ -72,23 +72,19 @@
#define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK
#define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK
#define PPSMC_MSG_SetSoftMinVcn0 0x15 ///< Set soft min for VCN0 clocks (VCLK0 and DCLK0)
-
#define PPSMC_MSG_EnableGfxImu 0x16 ///< Enable GFX IMU
-
-#define PPSMC_MSG_spare_0x17 0x17
-#define PPSMC_MSG_spare_0x18 0x18
+#define PPSMC_MSG_spare_0x17 0x17 ///< Get GFX clock frequency
+#define PPSMC_MSG_spare_0x18 0x18 ///< Get FCLK frequency
#define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry
#define PPSMC_MSG_DisallowGfxOff 0x1A ///< Inform PMFW of disallowing GFXOFF entry
#define PPSMC_MSG_SetSoftMaxGfxClk 0x1B ///< Set soft max for GFX CLK
#define PPSMC_MSG_SetHardMinGfxClk 0x1C ///< Set hard min for GFX CLK
-
#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x1D ///< Set soft max for SOC CLK
#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x1E ///< Set soft max for FCLK
#define PPSMC_MSG_SetSoftMaxVcn0 0x1F ///< Set soft max for VCN0 clocks (VCLK0 and DCLK0)
-#define PPSMC_MSG_spare_0x20 0x20
+#define PPSMC_MSG_spare_0x20 0x20 ///< Set power limit percentage
#define PPSMC_MSG_PowerDownJpeg0 0x21 ///< Power down Jpeg of VCN0
#define PPSMC_MSG_PowerUpJpeg0 0x22 ///< Power up Jpeg of VCN0; VCN0 is power gated by default
-
#define PPSMC_MSG_SetHardMinFclkByFreq 0x23 ///< Set hard min for FCLK
#define PPSMC_MSG_SetSoftMinSocclkByFreq 0x24 ///< Set soft min for SOC CLK
#define PPSMC_MSG_AllowZstates 0x25 ///< Inform PMFM of allowing Zstate entry, i.e. no Miracast activity
@@ -99,8 +95,8 @@
#define PPSMC_MSG_PowerUpIspByTile 0x2A ///< This message is used to power up ISP tiles and enable the ISP DPM
#define PPSMC_MSG_SetHardMinIspiclkByFreq 0x2B ///< Set HardMin by frequency for ISPICLK
#define PPSMC_MSG_SetHardMinIspxclkByFreq 0x2C ///< Set HardMin by frequency for ISPXCLK
-#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN.UMSCH (aka VSCH) scheduler
-#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN.UMSCH (aka VSCH) scheduler
+#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN0.UMSCH (aka VSCH) scheduler
+#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN0.UMSCH (aka VSCH) scheduler
#define PPSMC_Message_IspStutterOn_MmhubPgDis 0x2F ///< ISP StutterOn mmHub PgDis
#define PPSMC_Message_IspStutterOff_MmhubPgEn 0x30 ///< ISP StufferOff mmHub PgEn
#define PPSMC_MSG_PowerUpVpe 0x31 ///< Power up VPE
@@ -110,7 +106,9 @@
#define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA
#define PPSMC_MSG_SetSoftMaxVpe 0x36 ///<
#define PPSMC_MSG_SetSoftMinVpe 0x37 ///<
-#define PPSMC_Message_Count 0x38 ///< Total number of PPSMC messages
+#define PPSMC_MSG_AllocMALLCache 0x38 ///< Allocating MALL Cache
+#define PPSMC_MSG_ReleaseMALLCache 0x39 ///< Releasing MALL Cache
+#define PPSMC_Message_Count 0x3A ///< Total number of PPSMC messages
/** @}*/
/**
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
index 3f7463c1c1a9..4af1985ae446 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
@@ -27,6 +27,7 @@
#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1
#define FEATURE_MASK(feature) (1ULL << feature)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 9c03296f92cd..67117ced7c6a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2751,7 +2751,13 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
switch (mp1_state) {
case PP_MP1_STATE_UNLOAD:
- ret = smu_cmn_set_mp1_state(smu, mp1_state);
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_PrepareMp1ForUnload,
+ 0x55, NULL);
+
+ if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
+ ret = smu_v13_0_disable_pmfw_state(smu);
+
break;
default:
/* Ignore others */
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index bb98156b2fa1..4abfcd32747d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -226,8 +226,18 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!en && !adev->in_s0ix)
+ if (!en && adev->in_s4) {
+ /* Adds a GFX reset as workaround just before sending the
+ * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
+ * an invalid state.
+ */
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+ SMU_RESET_MODE_2, NULL);
+ if (ret)
+ return ret;
+
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 3957af057d54..c977ebe88001 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -2294,6 +2294,17 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
return sizeof(*gpu_metrics);
}
+static void smu_v13_0_6_restore_pci_config(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ for (i = 0; i < 16; i++)
+ pci_write_config_dword(adev->pdev, i * 4,
+ adev->pdev->saved_config_space[i]);
+ pci_restore_msi_state(adev->pdev);
+}
+
static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
{
int ret = 0, index;
@@ -2315,6 +2326,20 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
/* Restore the config space saved during init */
amdgpu_device_load_pci_state(adev->pdev);
+ /* Certain platforms have switches which assign virtual BAR values to
+ * devices. OS uses the virtual BAR values and device behind the switch
+ * is assgined another BAR value. When device's config space registers
+ * are queried, switch returns the virtual BAR values. When mode-2 reset
+ * is performed, switch is unaware of it, and will continue to return
+ * the same virtual values to the OS.This affects
+ * pci_restore_config_space() API as it doesn't write the value saved if
+ * the current value read from config space is the same as what is
+ * saved. As a workaround, make sure the config space is restored
+ * always.
+ */
+ if (!(adev->flags & AMD_IS_APU))
+ smu_v13_0_6_restore_pci_config(smu);
+
dev_dbg(smu->adev->dev, "wait for reset ack\n");
do {
ret = smu_cmn_wait_for_response(smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index 9e39f99154f9..07a65e005785 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -234,7 +234,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
break;
case IP_VERSION(14, 0, 1):
- smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
+ smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1;
break;
default:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index d6de6d97286c..63399c00cc28 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -161,7 +161,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu)
SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
+ SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, max(sizeof(DpmClocks_t), sizeof(DpmClocks_t_v14_0_1)),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@@ -171,7 +171,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu)
goto err0_out;
smu_table->metrics_time = 0;
- smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
+ smu_table->clocks_table = kzalloc(max(sizeof(DpmClocks_t), sizeof(DpmClocks_t_v14_0_1)), GFP_KERNEL);
if (!smu_table->clocks_table)
goto err1_out;
@@ -593,6 +593,60 @@ static int smu_v14_0_0_mode2_reset(struct smu_context *smu)
return ret;
}
+static int smu_v14_0_1_get_dpm_freq_by_index(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t dpm_level,
+ uint32_t *freq)
+{
+ DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+
+ if (!clk_table || clk_type >= SMU_CLK_COUNT)
+ return -EINVAL;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->SocClocks[dpm_level];
+ break;
+ case SMU_VCLK:
+ if (dpm_level >= clk_table->Vcn0ClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->VClocks0[dpm_level];
+ break;
+ case SMU_DCLK:
+ if (dpm_level >= clk_table->Vcn0ClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->DClocks0[dpm_level];
+ break;
+ case SMU_VCLK1:
+ if (dpm_level >= clk_table->Vcn1ClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->VClocks1[dpm_level];
+ break;
+ case SMU_DCLK1:
+ if (dpm_level >= clk_table->Vcn1ClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->DClocks1[dpm_level];
+ break;
+ case SMU_UCLK:
+ case SMU_MCLK:
+ if (dpm_level >= clk_table->NumMemPstatesEnabled)
+ return -EINVAL;
+ *freq = clk_table->MemPstateTable[dpm_level].MemClk;
+ break;
+ case SMU_FCLK:
+ if (dpm_level >= clk_table->NumFclkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->FclkClocks_Freq[dpm_level];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t dpm_level,
@@ -637,6 +691,19 @@ static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu,
return 0;
}
+static int smu_v14_0_common_get_dpm_freq_by_index(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t dpm_level,
+ uint32_t *freq)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq);
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ smu_v14_0_1_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq);
+
+ return 0;
+}
+
static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu,
enum smu_clk_type clk_type)
{
@@ -657,6 +724,8 @@ static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu,
break;
case SMU_VCLK:
case SMU_DCLK:
+ case SMU_VCLK1:
+ case SMU_DCLK1:
feature_id = SMU_FEATURE_VCN_DPM_BIT;
break;
default:
@@ -666,6 +735,126 @@ static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu,
return smu_cmn_feature_is_enabled(smu, feature_id);
}
+static int smu_v14_0_1_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+ uint32_t clock_limit;
+ uint32_t max_dpm_level, min_dpm_level;
+ int ret = 0;
+
+ if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ clock_limit = smu->smu_table.boot_values.uclk;
+ break;
+ case SMU_FCLK:
+ clock_limit = smu->smu_table.boot_values.fclk;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ clock_limit = smu->smu_table.boot_values.gfxclk;
+ break;
+ case SMU_SOCCLK:
+ clock_limit = smu->smu_table.boot_values.socclk;
+ break;
+ case SMU_VCLK:
+ case SMU_VCLK1:
+ clock_limit = smu->smu_table.boot_values.vclk;
+ break;
+ case SMU_DCLK:
+ case SMU_DCLK1:
+ clock_limit = smu->smu_table.boot_values.dclk;
+ break;
+ default:
+ clock_limit = 0;
+ break;
+ }
+
+ /* clock in Mhz unit */
+ if (min)
+ *min = clock_limit / 100;
+ if (max)
+ *max = clock_limit / 100;
+
+ return 0;
+ }
+
+ if (max) {
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ *max = clk_table->MaxGfxClk;
+ break;
+ case SMU_MCLK:
+ case SMU_UCLK:
+ case SMU_FCLK:
+ max_dpm_level = 0;
+ break;
+ case SMU_SOCCLK:
+ max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ max_dpm_level = clk_table->Vcn0ClkLevelsEnabled - 1;
+ break;
+ case SMU_VCLK1:
+ case SMU_DCLK1:
+ max_dpm_level = clk_table->Vcn1ClkLevelsEnabled - 1;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
+ if (ret)
+ goto failed;
+ }
+ }
+
+ if (min) {
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ *min = clk_table->MinGfxClk;
+ break;
+ case SMU_MCLK:
+ case SMU_UCLK:
+ min_dpm_level = clk_table->NumMemPstatesEnabled - 1;
+ break;
+ case SMU_FCLK:
+ min_dpm_level = clk_table->NumFclkLevelsEnabled - 1;
+ break;
+ case SMU_SOCCLK:
+ min_dpm_level = 0;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ case SMU_VCLK1:
+ case SMU_DCLK1:
+ min_dpm_level = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
+ if (ret)
+ goto failed;
+ }
+ }
+
+failed:
+ return ret;
+}
+
static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min,
@@ -736,7 +925,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
}
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
- ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
if (ret)
goto failed;
}
@@ -768,7 +957,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
}
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
- ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
if (ret)
goto failed;
}
@@ -778,6 +967,19 @@ failed:
return ret;
}
+static int smu_v14_0_common_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ smu_v14_0_0_get_dpm_ultimate_freq(smu, clk_type, min, max);
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ smu_v14_0_1_get_dpm_ultimate_freq(smu, clk_type, min, max);
+
+ return 0;
+}
+
static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *value)
@@ -811,6 +1013,37 @@ static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu,
return smu_v14_0_0_get_smu_metrics_data(smu, member_type, value);
}
+static int smu_v14_0_1_get_dpm_level_count(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *count)
+{
+ DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ *count = clk_table->NumSocClkLevelsEnabled;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ *count = clk_table->Vcn0ClkLevelsEnabled;
+ break;
+ case SMU_VCLK1:
+ case SMU_DCLK1:
+ *count = clk_table->Vcn1ClkLevelsEnabled;
+ break;
+ case SMU_MCLK:
+ *count = clk_table->NumMemPstatesEnabled;
+ break;
+ case SMU_FCLK:
+ *count = clk_table->NumFclkLevelsEnabled;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *count)
@@ -840,6 +1073,18 @@ static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu,
return 0;
}
+static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *count)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ smu_v14_0_0_get_dpm_level_count(smu, clk_type, count);
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ smu_v14_0_1_get_dpm_level_count(smu, clk_type, count);
+
+ return 0;
+}
+
static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
@@ -866,18 +1111,20 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
case SMU_SOCCLK:
case SMU_VCLK:
case SMU_DCLK:
+ case SMU_VCLK1:
+ case SMU_DCLK1:
case SMU_MCLK:
case SMU_FCLK:
ret = smu_v14_0_0_get_current_clk_freq(smu, clk_type, &cur_value);
if (ret)
break;
- ret = smu_v14_0_0_get_dpm_level_count(smu, clk_type, &count);
+ ret = smu_v14_0_common_get_dpm_level_count(smu, clk_type, &count);
if (ret)
break;
for (i = 0; i < count; i++) {
- ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, i, &value);
if (ret)
break;
@@ -940,8 +1187,13 @@ static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu,
break;
case SMU_VCLK:
case SMU_DCLK:
- msg_set_min = SMU_MSG_SetHardMinVcn;
- msg_set_max = SMU_MSG_SetSoftMaxVcn;
+ msg_set_min = SMU_MSG_SetHardMinVcn0;
+ msg_set_max = SMU_MSG_SetSoftMaxVcn0;
+ break;
+ case SMU_VCLK1:
+ case SMU_DCLK1:
+ msg_set_min = SMU_MSG_SetHardMinVcn1;
+ msg_set_max = SMU_MSG_SetSoftMaxVcn1;
break;
default:
return -EINVAL;
@@ -971,11 +1223,11 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu,
case SMU_FCLK:
case SMU_VCLK:
case SMU_DCLK:
- ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
if (ret)
break;
- ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
if (ret)
break;
@@ -1000,25 +1252,25 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu,
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max);
sclk_min = sclk_max;
fclk_min = fclk_max;
socclk_min = socclk_max;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL);
sclk_max = sclk_min;
fclk_max = fclk_min;
socclk_max = socclk_min;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max);
- smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max);
+ smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
@@ -1067,6 +1319,18 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu,
return ret;
}
+static int smu_v14_0_1_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
+{
+ DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+
+ smu->gfx_default_hard_min_freq = clk_table->MinGfxClk;
+ smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk;
+ smu->gfx_actual_hard_min_freq = 0;
+ smu->gfx_actual_soft_max_freq = 0;
+
+ return 0;
+}
+
static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
@@ -1079,6 +1343,16 @@ static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *sm
return 0;
}
+static int smu_v14_0_common_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ smu_v14_0_0_set_fine_grain_gfx_freq_parameters(smu);
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ smu_v14_0_1_set_fine_grain_gfx_freq_parameters(smu);
+
+ return 0;
+}
+
static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu,
bool enable)
{
@@ -1095,6 +1369,25 @@ static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu,
0, NULL);
}
+static int smu_14_0_1_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+ DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+ uint8_t idx;
+
+ /* Only the Clock information of SOC and VPE is copied to provide VPE DPM settings for use. */
+ for (idx = 0; idx < NUM_SOCCLK_DPM_LEVELS; idx++) {
+ clock_table->SocClocks[idx].Freq = (idx < clk_table->NumSocClkLevelsEnabled) ? clk_table->SocClocks[idx]:0;
+ clock_table->SocClocks[idx].Vol = 0;
+ }
+
+ for (idx = 0; idx < NUM_VPE_DPM_LEVELS; idx++) {
+ clock_table->VPEClocks[idx].Freq = (idx < clk_table->VpeClkLevelsEnabled) ? clk_table->VPEClocks[idx]:0;
+ clock_table->VPEClocks[idx].Vol = 0;
+ }
+
+ return 0;
+}
+
static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
@@ -1114,6 +1407,16 @@ static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *
return 0;
}
+static int smu_v14_0_common_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+ smu_14_0_0_get_dpm_table(smu, clock_table);
+ else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+ smu_14_0_1_get_dpm_table(smu, clock_table);
+
+ return 0;
+}
+
static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.check_fw_status = smu_v14_0_check_fw_status,
.check_fw_version = smu_v14_0_check_fw_version,
@@ -1135,16 +1438,16 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.set_driver_table_location = smu_v14_0_set_driver_table_location,
.gfx_off_control = smu_v14_0_gfx_off_control,
.mode2_reset = smu_v14_0_0_mode2_reset,
- .get_dpm_ultimate_freq = smu_v14_0_0_get_dpm_ultimate_freq,
+ .get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq,
.od_edit_dpm_table = smu_v14_0_od_edit_dpm_table,
.print_clk_levels = smu_v14_0_0_print_clk_levels,
.force_clk_levels = smu_v14_0_0_force_clk_levels,
.set_performance_level = smu_v14_0_0_set_performance_level,
- .set_fine_grain_gfx_freq_parameters = smu_v14_0_0_set_fine_grain_gfx_freq_parameters,
+ .set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters,
.set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu,
.dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
.dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
- .get_dpm_clock_table = smu_14_0_0_get_dpm_table,
+ .get_dpm_clock_table = smu_v14_0_common_get_dpm_table,
};
static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu)
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index ebb6d8ebd44e..1e9259416980 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -180,6 +180,7 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
{
struct ast_device *ast = to_ast_device(dev);
u8 video_on_off = on;
+ u32 i = 0;
// Video On/Off
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on);
@@ -192,6 +193,8 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) {
// wait 1 ms
mdelay(1);
+ if (++i > 200)
+ break;
}
}
}
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 871e4e2129d6..0683a129b362 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -777,6 +777,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
unsigned int total_modes_count = 0;
struct drm_client_offset *offsets;
unsigned int connector_count = 0;
+ /* points to modes protected by mode_config.mutex */
struct drm_display_mode **modes;
struct drm_crtc **crtcs;
int i, ret = 0;
@@ -845,7 +846,6 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
drm_client_pick_crtcs(client, connectors, connector_count,
crtcs, modes, 0, width, height);
}
- mutex_unlock(&dev->mode_config.mutex);
drm_client_modeset_release(client);
@@ -875,6 +875,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
modeset->y = offset->y;
}
}
+ mutex_unlock(&dev->mode_config.mutex);
mutex_unlock(&client->modeset_mutex);
out:
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index b0516505f7ae..4d2df7f64dc5 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -2940,7 +2940,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
dev->mode_config.max_width,
dev->mode_config.max_height);
else
- drm_dbg_kms(dev, "User-space requested a forced probe on [CONNECTOR:%d:%s] but is not the DRM master, demoting to read-only probe",
+ drm_dbg_kms(dev, "User-space requested a forced probe on [CONNECTOR:%d:%s] but is not the DRM master, demoting to read-only probe\n",
connector->base.id, connector->name);
}
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index e440f458b663..93337543aac3 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -224,8 +224,8 @@ __drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane,
__drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base);
- drm_format_conv_state_copy(&shadow_plane_state->fmtcnv_state,
- &new_shadow_plane_state->fmtcnv_state);
+ drm_format_conv_state_copy(&new_shadow_plane_state->fmtcnv_state,
+ &shadow_plane_state->fmtcnv_state);
}
EXPORT_SYMBOL(__drm_gem_duplicate_shadow_plane_state);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 734412aae94d..a9bf426f69b3 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -164,26 +164,6 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
*value = gpu->identity.eco_id;
break;
- case ETNAVIV_PARAM_GPU_NN_CORE_COUNT:
- *value = gpu->identity.nn_core_count;
- break;
-
- case ETNAVIV_PARAM_GPU_NN_MAD_PER_CORE:
- *value = gpu->identity.nn_mad_per_core;
- break;
-
- case ETNAVIV_PARAM_GPU_TP_CORE_COUNT:
- *value = gpu->identity.tp_core_count;
- break;
-
- case ETNAVIV_PARAM_GPU_ON_CHIP_SRAM_SIZE:
- *value = gpu->identity.on_chip_sram_size;
- break;
-
- case ETNAVIV_PARAM_GPU_AXI_SRAM_SIZE:
- *value = gpu->identity.axi_sram_size;
- break;
-
default:
DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
return -EINVAL;
@@ -663,8 +643,8 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
/* Disable TX clock gating on affected core revisions. */
if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
- etnaviv_is_model_rev(gpu, GC2000, 0x6202) ||
- etnaviv_is_model_rev(gpu, GC2000, 0x6203))
+ etnaviv_is_model_rev(gpu, GC7000, 0x6202) ||
+ etnaviv_is_model_rev(gpu, GC7000, 0x6203))
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
/* Disable SE and RA clock gating on affected core revisions. */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 7d5e9158e13c..197e0037732e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -54,18 +54,6 @@ struct etnaviv_chip_identity {
/* Number of Neural Network cores. */
u32 nn_core_count;
- /* Number of MAD units per Neural Network core. */
- u32 nn_mad_per_core;
-
- /* Number of Tensor Processing cores. */
- u32 tp_core_count;
-
- /* Size in bytes of the SRAM inside the NPU. */
- u32 on_chip_sram_size;
-
- /* Size in bytes of the SRAM across the AXI bus. */
- u32 axi_sram_size;
-
/* Size of the vertex cache. */
u32 vertex_cache_size;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
index d8e7334de8ce..8665f2658d51 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
@@ -17,10 +17,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 128,
.shader_core_count = 1,
.nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 8,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -52,11 +48,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.register_max = 64,
.thread_count = 256,
.shader_core_count = 1,
- .nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 8,
.vertex_output_buffer_size = 512,
.pixel_pipes = 1,
@@ -89,10 +80,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 512,
.shader_core_count = 2,
.nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -125,10 +112,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 512,
.shader_core_count = 2,
.nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -160,11 +143,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.register_max = 64,
.thread_count = 512,
.shader_core_count = 2,
- .nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -197,10 +175,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 1024,
.shader_core_count = 4,
.nn_core_count = 0,
- .nn_mad_per_core = 0,
- .tp_core_count = 0,
- .on_chip_sram_size = 0,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 2,
@@ -233,10 +207,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 256,
.shader_core_count = 1,
.nn_core_count = 8,
- .nn_mad_per_core = 64,
- .tp_core_count = 4,
- .on_chip_sram_size = 524288,
- .axi_sram_size = 1048576,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -269,10 +239,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 256,
.shader_core_count = 1,
.nn_core_count = 6,
- .nn_mad_per_core = 64,
- .tp_core_count = 3,
- .on_chip_sram_size = 262144,
- .axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index 4f302cd5e1a6..58fed80c7392 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -34,7 +34,6 @@ gma500_gfx-y += \
psb_intel_lvds.o \
psb_intel_modes.o \
psb_intel_sdvo.o \
- psb_lid.o \
psb_irq.o
gma500_gfx-$(CONFIG_ACPI) += opregion.o
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index dcfcd7b89d4a..6dece8f0e380 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -73,8 +73,7 @@ static int psb_backlight_setup(struct drm_device *dev)
}
psb_intel_lvds_set_brightness(dev, PSB_MAX_BRIGHTNESS);
- /* This must occur after the backlight is properly initialised */
- psb_lid_timer_init(dev_priv);
+
return 0;
}
@@ -259,8 +258,6 @@ static int psb_chip_setup(struct drm_device *dev)
static void psb_chip_teardown(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- psb_lid_timer_takedown(dev_priv);
gma_intel_teardown_gmbus(dev);
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index c5edfa4aa4cc..83c17689c454 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -162,7 +162,6 @@
#define PSB_NUM_VBLANKS 2
#define PSB_WATCHDOG_DELAY (HZ * 2)
-#define PSB_LID_DELAY (HZ / 10)
#define PSB_MAX_BRIGHTNESS 100
@@ -491,11 +490,7 @@ struct drm_psb_private {
/* Hotplug handling */
struct work_struct hotplug_work;
- /* LID-Switch */
- spinlock_t lid_lock;
- struct timer_list lid_timer;
struct psb_intel_opregion opregion;
- u32 lid_last_state;
/* Watchdog */
uint32_t apm_reg;
@@ -591,10 +586,6 @@ struct psb_ops {
int i2c_bus; /* I2C bus identifier for Moorestown */
};
-/* psb_lid.c */
-extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
-extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
-
/* modesetting */
extern void psb_modeset_init(struct drm_device *dev);
extern void psb_modeset_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
deleted file mode 100644
index 58a7fe392636..000000000000
--- a/drivers/gpu/drm/gma500/psb_lid.c
+++ /dev/null
@@ -1,80 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/**************************************************************************
- * Copyright (c) 2007, Intel Corporation.
- *
- * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
- **************************************************************************/
-
-#include <linux/spinlock.h>
-
-#include "psb_drv.h"
-#include "psb_intel_reg.h"
-#include "psb_reg.h"
-
-static void psb_lid_timer_func(struct timer_list *t)
-{
- struct drm_psb_private *dev_priv = from_timer(dev_priv, t, lid_timer);
- struct drm_device *dev = (struct drm_device *)&dev_priv->dev;
- struct timer_list *lid_timer = &dev_priv->lid_timer;
- unsigned long irq_flags;
- u32 __iomem *lid_state = dev_priv->opregion.lid_state;
- u32 pp_status;
-
- if (readl(lid_state) == dev_priv->lid_last_state)
- goto lid_timer_schedule;
-
- if ((readl(lid_state)) & 0x01) {
- /*lid state is open*/
- REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
- do {
- pp_status = REG_READ(PP_STATUS);
- } while ((pp_status & PP_ON) == 0 &&
- (pp_status & PP_SEQUENCE_MASK) != 0);
-
- if (REG_READ(PP_STATUS) & PP_ON) {
- /*FIXME: should be backlight level before*/
- psb_intel_lvds_set_brightness(dev, 100);
- } else {
- DRM_DEBUG("LVDS panel never powered up");
- return;
- }
- } else {
- psb_intel_lvds_set_brightness(dev, 0);
-
- REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
- do {
- pp_status = REG_READ(PP_STATUS);
- } while ((pp_status & PP_ON) == 0);
- }
- dev_priv->lid_last_state = readl(lid_state);
-
-lid_timer_schedule:
- spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
- if (!timer_pending(lid_timer)) {
- lid_timer->expires = jiffies + PSB_LID_DELAY;
- add_timer(lid_timer);
- }
- spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
-}
-
-void psb_lid_timer_init(struct drm_psb_private *dev_priv)
-{
- struct timer_list *lid_timer = &dev_priv->lid_timer;
- unsigned long irq_flags;
-
- spin_lock_init(&dev_priv->lid_lock);
- spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
-
- timer_setup(lid_timer, psb_lid_timer_func, 0);
-
- lid_timer->expires = jiffies + PSB_LID_DELAY;
-
- add_timer(lid_timer);
- spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
-}
-
-void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
-{
- del_timer_sync(&dev_priv->lid_timer);
-}
-
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 07e0c73204f3..ed81e1466c4b 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -76,19 +76,6 @@ struct intel_audio_funcs {
struct intel_crtc_state *crtc_state);
};
-/* DP N/M table */
-#define LC_810M 810000
-#define LC_540M 540000
-#define LC_270M 270000
-#define LC_162M 162000
-
-struct dp_aud_n_m {
- int sample_rate;
- int clock;
- u16 m;
- u16 n;
-};
-
struct hdmi_aud_ncts {
int sample_rate;
int clock;
@@ -96,60 +83,6 @@ struct hdmi_aud_ncts {
int cts;
};
-/* Values according to DP 1.4 Table 2-104 */
-static const struct dp_aud_n_m dp_aud_n_m[] = {
- { 32000, LC_162M, 1024, 10125 },
- { 44100, LC_162M, 784, 5625 },
- { 48000, LC_162M, 512, 3375 },
- { 64000, LC_162M, 2048, 10125 },
- { 88200, LC_162M, 1568, 5625 },
- { 96000, LC_162M, 1024, 3375 },
- { 128000, LC_162M, 4096, 10125 },
- { 176400, LC_162M, 3136, 5625 },
- { 192000, LC_162M, 2048, 3375 },
- { 32000, LC_270M, 1024, 16875 },
- { 44100, LC_270M, 784, 9375 },
- { 48000, LC_270M, 512, 5625 },
- { 64000, LC_270M, 2048, 16875 },
- { 88200, LC_270M, 1568, 9375 },
- { 96000, LC_270M, 1024, 5625 },
- { 128000, LC_270M, 4096, 16875 },
- { 176400, LC_270M, 3136, 9375 },
- { 192000, LC_270M, 2048, 5625 },
- { 32000, LC_540M, 1024, 33750 },
- { 44100, LC_540M, 784, 18750 },
- { 48000, LC_540M, 512, 11250 },
- { 64000, LC_540M, 2048, 33750 },
- { 88200, LC_540M, 1568, 18750 },
- { 96000, LC_540M, 1024, 11250 },
- { 128000, LC_540M, 4096, 33750 },
- { 176400, LC_540M, 3136, 18750 },
- { 192000, LC_540M, 2048, 11250 },
- { 32000, LC_810M, 1024, 50625 },
- { 44100, LC_810M, 784, 28125 },
- { 48000, LC_810M, 512, 16875 },
- { 64000, LC_810M, 2048, 50625 },
- { 88200, LC_810M, 1568, 28125 },
- { 96000, LC_810M, 1024, 16875 },
- { 128000, LC_810M, 4096, 50625 },
- { 176400, LC_810M, 3136, 28125 },
- { 192000, LC_810M, 2048, 16875 },
-};
-
-static const struct dp_aud_n_m *
-audio_config_dp_get_n_m(const struct intel_crtc_state *crtc_state, int rate)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dp_aud_n_m); i++) {
- if (rate == dp_aud_n_m[i].sample_rate &&
- crtc_state->port_clock == dp_aud_n_m[i].clock)
- return &dp_aud_n_m[i];
- }
-
- return NULL;
-}
-
static const struct {
int clock;
u32 config;
@@ -387,47 +320,17 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = i915->display.audio.component;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- enum port port = encoder->port;
- const struct dp_aud_n_m *nm;
- int rate;
- u32 tmp;
-
- rate = acomp ? acomp->aud_sample_rate[port] : 0;
- nm = audio_config_dp_get_n_m(crtc_state, rate);
- if (nm)
- drm_dbg_kms(&i915->drm, "using Maud %u, Naud %u\n", nm->m,
- nm->n);
- else
- drm_dbg_kms(&i915->drm, "using automatic Maud, Naud\n");
-
- tmp = intel_de_read(i915, HSW_AUD_CFG(cpu_transcoder));
- tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
- tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
- tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
- tmp |= AUD_CONFIG_N_VALUE_INDEX;
- if (nm) {
- tmp &= ~AUD_CONFIG_N_MASK;
- tmp |= AUD_CONFIG_N(nm->n);
- tmp |= AUD_CONFIG_N_PROG_ENABLE;
- }
-
- intel_de_write(i915, HSW_AUD_CFG(cpu_transcoder), tmp);
-
- tmp = intel_de_read(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
- tmp &= ~AUD_CONFIG_M_MASK;
- tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
- tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
-
- if (nm) {
- tmp |= nm->m;
- tmp |= AUD_M_CTS_M_VALUE_INDEX;
- tmp |= AUD_M_CTS_M_PROG_ENABLE;
- }
+ /* Enable time stamps. Let HW calculate Maud/Naud values */
+ intel_de_rmw(i915, HSW_AUD_CFG(cpu_transcoder),
+ AUD_CONFIG_N_VALUE_INDEX |
+ AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK |
+ AUD_CONFIG_UPPER_N_MASK |
+ AUD_CONFIG_LOWER_N_MASK |
+ AUD_CONFIG_N_PROG_ENABLE,
+ AUD_CONFIG_N_VALUE_INDEX);
- intel_de_write(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
}
static void
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 52bd3576835b..7d1e443f97b9 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1042,22 +1042,11 @@ parse_lfp_backlight(struct drm_i915_private *i915,
panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
panel->vbt.backlight.controller = 0;
if (i915->display.vbt.version >= 191) {
- size_t exp_size;
+ const struct lfp_backlight_control_method *method;
- if (i915->display.vbt.version >= 236)
- exp_size = sizeof(struct bdb_lfp_backlight_data);
- else if (i915->display.vbt.version >= 234)
- exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_234;
- else
- exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_191;
-
- if (get_blocksize(backlight_data) >= exp_size) {
- const struct lfp_backlight_control_method *method;
-
- method = &backlight_data->backlight_control[panel_type];
- panel->vbt.backlight.type = method->type;
- panel->vbt.backlight.controller = method->controller;
- }
+ method = &backlight_data->backlight_control[panel_type];
+ panel->vbt.backlight.type = method->type;
+ panel->vbt.backlight.controller = method->controller;
}
panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index ed89b86ea625..f672bfd70d45 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -2534,7 +2534,8 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
- enum pipe pipe = new_cdclk_state->pipe;
+ struct intel_cdclk_config cdclk_config;
+ enum pipe pipe;
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
@@ -2543,12 +2544,25 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
if (IS_DG2(i915))
intel_cdclk_pcode_pre_notify(state);
- if (pipe == INVALID_PIPE ||
- old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
- drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+ if (new_cdclk_state->disable_pipes) {
+ cdclk_config = new_cdclk_state->actual;
+ pipe = INVALID_PIPE;
+ } else {
+ if (new_cdclk_state->actual.cdclk >= old_cdclk_state->actual.cdclk) {
+ cdclk_config = new_cdclk_state->actual;
+ pipe = new_cdclk_state->pipe;
+ } else {
+ cdclk_config = old_cdclk_state->actual;
+ pipe = INVALID_PIPE;
+ }
- intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
+ cdclk_config.voltage_level = max(new_cdclk_state->actual.voltage_level,
+ old_cdclk_state->actual.voltage_level);
}
+
+ drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+
+ intel_set_cdclk(i915, &cdclk_config, pipe);
}
/**
@@ -2566,7 +2580,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
- enum pipe pipe = new_cdclk_state->pipe;
+ enum pipe pipe;
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
@@ -2575,12 +2589,15 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
if (IS_DG2(i915))
intel_cdclk_pcode_post_notify(state);
- if (pipe != INVALID_PIPE &&
- old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
- drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+ if (!new_cdclk_state->disable_pipes &&
+ new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk)
+ pipe = new_cdclk_state->pipe;
+ else
+ pipe = INVALID_PIPE;
+
+ drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
- intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
- }
+ intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
}
static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
@@ -3058,6 +3075,7 @@ static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_globa
return NULL;
cdclk_state->pipe = INVALID_PIPE;
+ cdclk_state->disable_pipes = false;
return &cdclk_state->base;
}
@@ -3236,6 +3254,8 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
if (ret)
return ret;
+ new_cdclk_state->disable_pipes = true;
+
drm_dbg_kms(&dev_priv->drm,
"Modeset required for cdclk change\n");
}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index 48fd7d39e0cd..71bc032bfef1 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -51,6 +51,9 @@ struct intel_cdclk_state {
/* bitmask of active pipes */
u8 active_pipes;
+
+ /* update cdclk with pipes disabled */
+ bool disable_pipes;
};
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index c587a8efeafc..c17462b4c2ac 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -4256,7 +4256,12 @@ static bool m_n_equal(const struct intel_link_m_n *m_n_1,
static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1,
const struct intel_crtc_state *crtc_state2)
{
+ /*
+ * FIXME the modeset sequence is currently wrong and
+ * can't deal with bigjoiner + port sync at the same time.
+ */
return crtc_state1->hw.active && crtc_state2->hw.active &&
+ !crtc_state1->bigjoiner_pipes && !crtc_state2->bigjoiner_pipes &&
crtc_state1->output_types == crtc_state2->output_types &&
crtc_state1->output_format == crtc_state2->output_format &&
crtc_state1->lane_count == crtc_state2->lane_count &&
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index abd62bebc46d..e583515f9b25 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -2725,7 +2725,11 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
int pixel_clock;
- if (has_seamless_m_n(connector))
+ /*
+ * FIXME all joined pipes share the same transcoder.
+ * Need to account for that when updating M/N live.
+ */
+ if (has_seamless_m_n(connector) && !pipe_config->bigjoiner_pipes)
pipe_config->update_m_n = true;
if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index b98a87883fef..9db43bd81ce2 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -691,12 +691,15 @@ int intel_dp_hdcp_get_remote_capability(struct intel_connector *connector,
u8 bcaps;
int ret;
+ *hdcp_capable = false;
+ *hdcp2_capable = false;
if (!intel_encoder_is_mst(connector->encoder))
return -EINVAL;
ret = _intel_dp_hdcp2_get_capability(aux, hdcp2_capable);
if (ret)
- return ret;
+ drm_dbg_kms(&i915->drm,
+ "HDCP2 DPCD capability read failed err: %d\n", ret);
ret = intel_dp_hdcp_read_bcaps(aux, i915, &bcaps);
if (ret)
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index b6e539f1342c..aabd018bd737 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -1422,6 +1422,17 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
+ /*
+ * FIXME figure out what is wrong with PSR+bigjoiner and
+ * fix it. Presumably something related to the fact that
+ * PSR is a transcoder level feature.
+ */
+ if (crtc_state->bigjoiner_pipes) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR disabled due to bigjoiner\n");
+ return;
+ }
+
if (CAN_PANEL_REPLAY(intel_dp))
crtc_state->has_panel_replay = true;
else
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index a9f44abfc9fc..b50cd0dcabda 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -897,11 +897,6 @@ struct lfp_brightness_level {
u16 reserved;
} __packed;
-#define EXP_BDB_LFP_BL_DATA_SIZE_REV_191 \
- offsetof(struct bdb_lfp_backlight_data, brightness_level)
-#define EXP_BDB_LFP_BL_DATA_SIZE_REV_234 \
- offsetof(struct bdb_lfp_backlight_data, brightness_precision_bits)
-
struct bdb_lfp_backlight_data {
u8 entry_size;
struct lfp_backlight_data_entry data[16];
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index eb5bd0743902..f542ee1db1d9 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -117,6 +117,13 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
const struct drm_display_info *info = &connector->base.display_info;
int vmin, vmax;
+ /*
+ * FIXME all joined pipes share the same transcoder.
+ * Need to account for that during VRR toggle/push/etc.
+ */
+ if (crtc_state->bigjoiner_pipes)
+ return;
+
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
return;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
index 044219c5960a..99b71bb7da0a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
@@ -8,14 +8,14 @@
#include "intel_gt_ccs_mode.h"
#include "intel_gt_regs.h"
-void intel_gt_apply_ccs_mode(struct intel_gt *gt)
+unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt)
{
int cslice;
u32 mode = 0;
int first_ccs = __ffs(CCS_MASK(gt));
if (!IS_DG2(gt->i915))
- return;
+ return 0;
/* Build the value for the fixed CCS load balancing */
for (cslice = 0; cslice < I915_MAX_CCS; cslice++) {
@@ -35,5 +35,5 @@ void intel_gt_apply_ccs_mode(struct intel_gt *gt)
XEHP_CCS_MODE_CSLICE_MASK);
}
- intel_uncore_write(gt->uncore, XEHP_CCS_MODE, mode);
+ return mode;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h
index 9e5549caeb26..55547f2ff426 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h
@@ -8,6 +8,6 @@
struct intel_gt;
-void intel_gt_apply_ccs_mode(struct intel_gt *gt);
+unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt);
#endif /* __INTEL_GT_CCS_MODE_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 6ec3582c9735..85c860ea9d7c 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -2859,6 +2859,7 @@ add_render_compute_tuning_settings(struct intel_gt *gt,
static void ccs_engine_wa_mode(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct intel_gt *gt = engine->gt;
+ u32 mode;
if (!IS_DG2(gt->i915))
return;
@@ -2875,7 +2876,8 @@ static void ccs_engine_wa_mode(struct intel_engine_cs *engine, struct i915_wa_li
* After having disabled automatic load balancing we need to
* assign all slices to a single CCS. We will call it CCS mode 1
*/
- intel_gt_apply_ccs_mode(gt);
+ mode = intel_gt_apply_ccs_mode(gt);
+ wa_masked_en(wal, XEHP_CCS_MODE, mode);
}
/*
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index f3dcae4b9d45..0f83c6d4376f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1403,14 +1403,17 @@ static void guc_cancel_busyness_worker(struct intel_guc *guc)
* Trying to pass a 'need_sync' or 'in_reset' flag all the way down through
* every possible call stack is unfeasible. It would be too intrusive to many
* areas that really don't care about the GuC backend. However, there is the
- * 'reset_in_progress' flag available, so just use that.
+ * I915_RESET_BACKOFF flag and the gt->reset.mutex can be tested for is_locked.
+ * So just use those. Note that testing both is required due to the hideously
+ * complex nature of the i915 driver's reset code paths.
*
* And note that in the case of a reset occurring during driver unload
- * (wedge_on_fini), skipping the cancel in _prepare (when the reset flag is set
- * is fine because there is another cancel in _finish (when the reset flag is
- * not).
+ * (wedged_on_fini), skipping the cancel in reset_prepare/reset_fini (when the
+ * reset flag/mutex are set) is fine because there is another explicit cancel in
+ * intel_guc_submission_fini (when the reset flag/mutex are not).
*/
- if (guc_to_gt(guc)->uc.reset_in_progress)
+ if (mutex_is_locked(&guc_to_gt(guc)->reset.mutex) ||
+ test_bit(I915_RESET_BACKOFF, &guc_to_gt(guc)->reset.flags))
cancel_delayed_work(&guc->timestamp.work);
else
cancel_delayed_work_sync(&guc->timestamp.work);
@@ -1424,8 +1427,6 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
unsigned long flags;
ktime_t unused;
- guc_cancel_busyness_worker(guc);
-
spin_lock_irqsave(&guc->timestamp.lock, flags);
guc_update_pm_timestamp(guc, &unused);
@@ -2004,13 +2005,6 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc)
void intel_guc_submission_reset_finish(struct intel_guc *guc)
{
- /*
- * Ensure the busyness worker gets cancelled even on a fatal wedge.
- * Note that reset_prepare is not allowed to because it confuses lockdep.
- */
- if (guc_submission_initialized(guc))
- guc_cancel_busyness_worker(guc);
-
/* Reset called during driver load or during wedge? */
if (unlikely(!guc_submission_initialized(guc) ||
!intel_guc_is_fw_running(guc) ||
@@ -2136,6 +2130,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
if (!guc->submission_initialized)
return;
+ guc_fini_engine_stats(guc);
guc_flush_destroyed_contexts(guc);
guc_lrc_desc_pool_destroy_v69(guc);
i915_sched_engine_put(guc->sched_engine);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 6dfe5d9456c6..399bc319180b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -637,6 +637,10 @@ void intel_uc_reset_finish(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
+ /*
+ * NB: The wedge code path results in prepare -> prepare -> finish -> finish.
+ * So this function is sometimes called with the in-progress flag not set.
+ */
uc->reset_in_progress = false;
/* Firmware expected to be running when this function is called */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_mips.h b/drivers/gpu/drm/imagination/pvr_fw_mips.h
index 408dbe63a90c..a0c5c41c8aa2 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_mips.h
+++ b/drivers/gpu/drm/imagination/pvr_fw_mips.h
@@ -7,13 +7,14 @@
#include "pvr_rogue_mips.h"
#include <asm/page.h>
+#include <linux/math.h>
#include <linux/types.h>
/* Forward declaration from pvr_gem.h. */
struct pvr_gem_object;
-#define PVR_MIPS_PT_PAGE_COUNT ((ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * ROGUE_MIPSFW_PAGE_SIZE_4K) \
- >> PAGE_SHIFT)
+#define PVR_MIPS_PT_PAGE_COUNT DIV_ROUND_UP(ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * ROGUE_MIPSFW_PAGE_SIZE_4K, PAGE_SIZE)
+
/**
* struct pvr_fw_mips_data - MIPS-specific data
*/
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 5a9538bc0e26..5565f7777529 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -106,6 +106,8 @@
#define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 */
#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 */
#define HHI_HDMI_PHY_CNTL1 0x3a4 /* 0xe9 */
+#define PHY_CNTL1_INIT 0x03900000
+#define PHY_INVERT BIT(17)
#define HHI_HDMI_PHY_CNTL2 0x3a8 /* 0xea */
#define HHI_HDMI_PHY_CNTL3 0x3ac /* 0xeb */
#define HHI_HDMI_PHY_CNTL4 0x3b0 /* 0xec */
@@ -130,6 +132,8 @@ struct meson_dw_hdmi_data {
unsigned int addr);
void (*dwc_write)(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr, unsigned int data);
+ u32 cntl0_init;
+ u32 cntl1_init;
};
struct meson_dw_hdmi {
@@ -384,26 +388,6 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
dw_hdmi_bus_fmt_is_420(hdmi))
mode_is_420 = true;
- /* Enable clocks */
- regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100);
-
- /* Bring HDMITX MEM output of power down */
- regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0);
-
- /* Bring out of reset */
- dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_SW_RESET, 0);
-
- /* Enable internal pixclk, tmds_clk, spdif_clk, i2s_clk, cecclk */
- dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
- 0x3, 0x3);
-
- /* Enable cec_clk and hdcp22_tmdsclk_en */
- dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
- 0x3 << 4, 0x3 << 4);
-
- /* Enable normal output to PHY */
- dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
-
/* TMDS pattern setup */
if (mode->clock > 340000 && !mode_is_420) {
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
@@ -425,20 +409,6 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
/* Setup PHY parameters */
meson_hdmi_phy_setup_mode(dw_hdmi, mode, mode_is_420);
- /* Setup PHY */
- regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
- 0xffff << 16, 0x0390 << 16);
-
- /* BIT_INVERT */
- if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
- dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi") ||
- dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-g12a-dw-hdmi"))
- regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
- BIT(17), 0);
- else
- regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
- BIT(17), BIT(17));
-
/* Disable clock, fifo, fifo_wr */
regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, 0xf, 0);
@@ -492,7 +462,9 @@ static void dw_hdmi_phy_disable(struct dw_hdmi *hdmi,
DRM_DEBUG_DRIVER("\n");
- regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0);
+ /* Fallback to init mode */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL1, dw_hdmi->data->cntl1_init);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, dw_hdmi->data->cntl0_init);
}
static enum drm_connector_status dw_hdmi_read_hpd(struct dw_hdmi *hdmi,
@@ -610,11 +582,22 @@ static const struct regmap_config meson_dw_hdmi_regmap_config = {
.fast_io = true,
};
-static const struct meson_dw_hdmi_data meson_dw_hdmi_gx_data = {
+static const struct meson_dw_hdmi_data meson_dw_hdmi_gxbb_data = {
.top_read = dw_hdmi_top_read,
.top_write = dw_hdmi_top_write,
.dwc_read = dw_hdmi_dwc_read,
.dwc_write = dw_hdmi_dwc_write,
+ .cntl0_init = 0x0,
+ .cntl1_init = PHY_CNTL1_INIT | PHY_INVERT,
+};
+
+static const struct meson_dw_hdmi_data meson_dw_hdmi_gxl_data = {
+ .top_read = dw_hdmi_top_read,
+ .top_write = dw_hdmi_top_write,
+ .dwc_read = dw_hdmi_dwc_read,
+ .dwc_write = dw_hdmi_dwc_write,
+ .cntl0_init = 0x0,
+ .cntl1_init = PHY_CNTL1_INIT,
};
static const struct meson_dw_hdmi_data meson_dw_hdmi_g12a_data = {
@@ -622,6 +605,8 @@ static const struct meson_dw_hdmi_data meson_dw_hdmi_g12a_data = {
.top_write = dw_hdmi_g12a_top_write,
.dwc_read = dw_hdmi_g12a_dwc_read,
.dwc_write = dw_hdmi_g12a_dwc_write,
+ .cntl0_init = 0x000b4242, /* Bandgap */
+ .cntl1_init = PHY_CNTL1_INIT,
};
static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi)
@@ -656,6 +641,13 @@ static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi)
meson_dw_hdmi->data->top_write(meson_dw_hdmi,
HDMITX_TOP_CLK_CNTL, 0xff);
+ /* Enable normal output to PHY */
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
+
+ /* Setup PHY */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL1, meson_dw_hdmi->data->cntl1_init);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, meson_dw_hdmi->data->cntl0_init);
+
/* Enable HDMI-TX Interrupt */
meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
HDMITX_TOP_INTR_CORE);
@@ -865,11 +857,11 @@ static const struct dev_pm_ops meson_dw_hdmi_pm_ops = {
static const struct of_device_id meson_dw_hdmi_of_table[] = {
{ .compatible = "amlogic,meson-gxbb-dw-hdmi",
- .data = &meson_dw_hdmi_gx_data },
+ .data = &meson_dw_hdmi_gxbb_data },
{ .compatible = "amlogic,meson-gxl-dw-hdmi",
- .data = &meson_dw_hdmi_gx_data },
+ .data = &meson_dw_hdmi_gxl_data },
{ .compatible = "amlogic,meson-gxm-dw-hdmi",
- .data = &meson_dw_hdmi_gx_data },
+ .data = &meson_dw_hdmi_gxl_data },
{ .compatible = "amlogic,meson-g12a-dw-hdmi",
.data = &meson_dw_hdmi_g12a_data },
{ }
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 0674aca0f8a3..cf0b1de1c071 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1377,6 +1377,10 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
if (adreno_is_a618(gpu))
gpu->ubwc_config.highest_bank_bit = 14;
+ if (adreno_is_a619(gpu))
+ /* TODO: Should be 14 but causes corruption at e.g. 1920x1200 on DP */
+ gpu->ubwc_config.highest_bank_bit = 13;
+
if (adreno_is_a619_holi(gpu))
gpu->ubwc_config.highest_bank_bit = 13;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 1f5245fc2cdc..a847a0f7a73c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -852,7 +852,7 @@ static void a6xx_get_shader_block(struct msm_gpu *gpu,
(block->type << 8) | i);
in += CRASHDUMP_READ(in, REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE,
- block->size, dumper->iova + A6XX_CD_DATA_OFFSET);
+ block->size, out);
out += block->size * sizeof(u32);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
index 9a9f7092c526..a3e60ac70689 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
@@ -324,6 +324,7 @@ static const struct dpu_wb_cfg x1e80100_wb[] = {
},
};
+/* TODO: INTF 3, 8 and 7 are used for MST, marked as INTF_NONE for now */
static const struct dpu_intf_cfg x1e80100_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -358,8 +359,8 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
.name = "intf_3", .id = INTF_3,
.base = 0x37000, .len = 0x280,
.features = INTF_SC7280_MASK,
- .type = INTF_DP,
- .controller_id = MSM_DP_CONTROLLER_1,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
@@ -368,7 +369,7 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
.base = 0x38000, .len = 0x280,
.features = INTF_SC7280_MASK,
.type = INTF_DP,
- .controller_id = MSM_DP_CONTROLLER_2,
+ .controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21),
@@ -381,6 +382,33 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23),
+ }, {
+ .name = "intf_6", .id = INTF_6,
+ .base = 0x3A000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_2,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
+ }, {
+ .name = "intf_7", .id = INTF_7,
+ .base = 0x3b000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_2, /* pair with intf_6 for DP MST */
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 18),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 19),
+ }, {
+ .name = "intf_8", .id = INTF_8,
+ .base = 0x3c000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_1, /* pair with intf_4 for DP MST */
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index ef871239adb2..68fae048a9a8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -459,15 +459,15 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
&perf->core_clk_rate);
debugfs_create_u32("enable_bw_release", 0600, entry,
(u32 *)&perf->enable_bw_release);
- debugfs_create_u32("threshold_low", 0600, entry,
+ debugfs_create_u32("threshold_low", 0400, entry,
(u32 *)&perf->perf_cfg->max_bw_low);
- debugfs_create_u32("threshold_high", 0600, entry,
+ debugfs_create_u32("threshold_high", 0400, entry,
(u32 *)&perf->perf_cfg->max_bw_high);
- debugfs_create_u32("min_core_ib", 0600, entry,
+ debugfs_create_u32("min_core_ib", 0400, entry,
(u32 *)&perf->perf_cfg->min_core_ib);
- debugfs_create_u32("min_llcc_ib", 0600, entry,
+ debugfs_create_u32("min_llcc_ib", 0400, entry,
(u32 *)&perf->perf_cfg->min_llcc_ib);
- debugfs_create_u32("min_dram_ib", 0600, entry,
+ debugfs_create_u32("min_dram_ib", 0400, entry,
(u32 *)&perf->perf_cfg->min_dram_ib);
debugfs_create_file("perf_mode", 0600, entry,
(u32 *)perf, &dpu_core_perf_mode_fops);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index 946dd0135dff..6a0a74832fb6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -525,14 +525,14 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
int ret;
if (!irq_cb) {
- DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
- DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
+ DPU_ERROR("IRQ=[%d, %d] NULL callback\n",
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
return -EINVAL;
}
if (!dpu_core_irq_is_valid(irq_idx)) {
- DPU_ERROR("invalid IRQ=[%d, %d]\n",
- DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
+ DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index c4cb82af5c2f..ffbfde922589 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -484,7 +484,7 @@ static void dp_display_handle_video_request(struct dp_display_private *dp)
}
}
-static int dp_display_handle_port_ststus_changed(struct dp_display_private *dp)
+static int dp_display_handle_port_status_changed(struct dp_display_private *dp)
{
int rc = 0;
@@ -541,7 +541,7 @@ static int dp_display_usbpd_attention_cb(struct device *dev)
drm_dbg_dp(dp->drm_dev, "hpd_state=%d sink_request=%d\n",
dp->hpd_state, sink_request);
if (sink_request & DS_PORT_STATUS_CHANGED)
- rc = dp_display_handle_port_ststus_changed(dp);
+ rc = dp_display_handle_port_status_changed(dp);
else
rc = dp_display_handle_irq_hpd(dp);
}
@@ -588,6 +588,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
ret = dp_display_usbpd_configure_cb(&pdev->dev);
if (ret) { /* link train failed */
dp->hpd_state = ST_DISCONNECTED;
+ pm_runtime_put_sync(&pdev->dev);
} else {
dp->hpd_state = ST_MAINLINK_READY;
}
@@ -645,6 +646,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
dp_display_host_phy_exit(dp);
dp->hpd_state = ST_DISCONNECTED;
dp_display_notify_disconnect(&dp->dp_display.pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
mutex_unlock(&dp->event_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index e3f61c39df69..80166f702a0d 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -89,7 +89,7 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
for (i = 0; i < n; i++) {
ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &msm_fb->iova[i]);
- drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)",
+ drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)\n",
fb->base.id, i, msm_fb->iova[i], ret);
if (ret)
return ret;
@@ -176,7 +176,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
const struct msm_format *format;
int ret, i, n;
- drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%4.4s)",
+ drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%4.4s)\n",
mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
@@ -232,7 +232,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
refcount_set(&msm_fb->dirtyfb, 1);
- drm_dbg_state(dev, "create: FB ID: %d (%p)", fb->base.id, fb);
+ drm_dbg_state(dev, "create: FB ID: %d (%p)\n", fb->base.id, fb);
return fb;
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
index 84c21ec2ceea..af6a6fcb1173 100644
--- a/drivers/gpu/drm/msm/msm_kms.c
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -149,7 +149,7 @@ int msm_crtc_enable_vblank(struct drm_crtc *crtc)
struct msm_kms *kms = priv->kms;
if (!kms)
return -ENXIO;
- drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
+ drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
return vblank_ctrl_queue_work(priv, crtc, true);
}
@@ -160,7 +160,7 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc)
struct msm_kms *kms = priv->kms;
if (!kms)
return;
- drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
+ drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
vblank_ctrl_queue_work(priv, crtc, false);
}
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 6f5d376d8fcc..a11d16a16c3b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -15,7 +15,9 @@ struct nvkm_gsp_mem {
};
struct nvkm_gsp_radix3 {
- struct nvkm_gsp_mem mem[3];
+ struct nvkm_gsp_mem lvl0;
+ struct nvkm_gsp_mem lvl1;
+ struct sg_table lvl2;
};
int nvkm_gsp_sg(struct nvkm_device *, u64 size, struct sg_table *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 479effcf607e..79cfab53f80e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -23,6 +23,7 @@
*/
#include "nouveau_drv.h"
+#include "nouveau_bios.h"
#include "nouveau_reg.h"
#include "dispnv04/hw.h"
#include "nouveau_encoder.h"
@@ -1677,7 +1678,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
*/
if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
if (*conn == 0xf2005014 && *conf == 0xffffffff) {
- fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
+ fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B);
return false;
}
}
@@ -1763,26 +1764,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
#ifdef __powerpc__
/* Apple iMac G4 NV17 */
if (of_machine_is_compatible("PowerMac4,5")) {
- fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
- fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
+ fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B);
+ fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C);
return;
}
#endif
/* Make up some sane defaults */
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
- bios->legacy.i2c_indices.crt, 1, 1);
+ bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B);
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
bios->legacy.i2c_indices.tv,
- all_heads, 0);
+ all_heads, DCB_OUTPUT_A);
else if (bios->tmds.output0_script_ptr ||
bios->tmds.output1_script_ptr)
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
bios->legacy.i2c_indices.panel,
- all_heads, 1);
+ all_heads, DCB_OUTPUT_B);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 7de7707ec6a8..a72c45809484 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -225,12 +225,18 @@ nouveau_dp_detect(struct nouveau_connector *nv_connector,
u8 *dpcd = nv_encoder->dp.dpcd;
int ret = NOUVEAU_DP_NONE, hpd;
- /* If we've already read the DPCD on an eDP device, we don't need to
- * reread it as it won't change
+ /* eDP ports don't support hotplugging - so there's no point in probing eDP ports unless we
+ * haven't probed them once before.
*/
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
- dpcd[DP_DPCD_REV] != 0)
- return NOUVEAU_DP_SST;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (connector->status == connector_status_connected)
+ return NOUVEAU_DP_SST;
+ else if (connector->status == connector_status_disconnected)
+ return NOUVEAU_DP_NONE;
+ }
+
+ // Ensure that the aux bus is enabled for probing
+ drm_dp_dpcd_set_powered(&nv_connector->aux, true);
mutex_lock(&nv_encoder->dp.hpd_irq_lock);
if (mstm) {
@@ -293,6 +299,13 @@ out:
if (mstm && !mstm->suspended && ret != NOUVEAU_DP_MST)
nv50_mstm_remove(mstm);
+ /* GSP doesn't like when we try to do aux transactions on a port it considers disconnected,
+ * and since we don't really have a usecase for that anyway - just disable the aux bus here
+ * if we've decided the connector is disconnected
+ */
+ if (ret == NOUVEAU_DP_NONE)
+ drm_dp_dpcd_set_powered(&nv_connector->aux, false);
+
mutex_unlock(&nv_encoder->dp.hpd_irq_lock);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
index 4bf486b57101..cb05f7f48a98 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
@@ -66,11 +66,16 @@ of_init(struct nvkm_bios *bios, const char *name)
return ERR_PTR(-EINVAL);
}
+static void of_fini(void *p)
+{
+ kfree(p);
+}
+
const struct nvbios_source
nvbios_of = {
.name = "OpenFirmware",
.init = of_init,
- .fini = (void(*)(void *))kfree,
+ .fini = of_fini,
.read = of_read,
.size = of_size,
.rw = false,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
index 7bcbc4895ec2..271bfa038f5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
@@ -25,6 +25,7 @@
#include <subdev/bios.h>
#include <subdev/bios/init.h>
+#include <subdev/gsp.h>
void
gm107_devinit_disable(struct nvkm_devinit *init)
@@ -33,10 +34,13 @@ gm107_devinit_disable(struct nvkm_devinit *init)
u32 r021c00 = nvkm_rd32(device, 0x021c00);
u32 r021c04 = nvkm_rd32(device, 0x021c04);
- if (r021c00 & 0x00000001)
- nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
- if (r021c00 & 0x00000004)
- nvkm_subdev_disable(device, NVKM_ENGINE_CE, 2);
+ /* gsp only wants to enable/disable display */
+ if (!nvkm_gsp_rm(device->gsp)) {
+ if (r021c00 & 0x00000001)
+ nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
+ if (r021c00 & 0x00000004)
+ nvkm_subdev_disable(device, NVKM_ENGINE_CE, 2);
+ }
if (r021c04 & 0x00000001)
nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
index 11b4c9c274a1..666eb93b1742 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
@@ -41,6 +41,7 @@ r535_devinit_new(const struct nvkm_devinit_func *hw,
rm->dtor = r535_devinit_dtor;
rm->post = hw->post;
+ rm->disable = hw->disable;
ret = nv50_devinit_new_(rm, device, type, inst, pdevinit);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index 9994cbd6f1c4..abe41f7a3404 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -1112,7 +1112,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
rpc->numEntries = NV_GSP_REG_NUM_ENTRIES;
str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]);
- strings = (char *)&rpc->entries[NV_GSP_REG_NUM_ENTRIES];
+ strings = (char *)rpc + str_offset;
for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) {
int name_len = strlen(r535_registry_entries[i].name) + 1;
@@ -1624,7 +1624,7 @@ r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
meta->magic = GSP_FW_WPR_META_MAGIC;
meta->revision = GSP_FW_WPR_META_REVISION;
- meta->sysmemAddrOfRadix3Elf = gsp->radix3.mem[0].addr;
+ meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr;
meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size;
meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
@@ -1919,8 +1919,9 @@ nvkm_gsp_sg(struct nvkm_device *device, u64 size, struct sg_table *sgt)
static void
nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
{
- for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--)
- nvkm_gsp_mem_dtor(gsp, &rx3->mem[i]);
+ nvkm_gsp_sg_free(gsp->subdev.device, &rx3->lvl2);
+ nvkm_gsp_mem_dtor(gsp, &rx3->lvl1);
+ nvkm_gsp_mem_dtor(gsp, &rx3->lvl0);
}
/**
@@ -1960,36 +1961,60 @@ static int
nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size,
struct nvkm_gsp_radix3 *rx3)
{
- u64 addr;
+ struct sg_dma_page_iter sg_dma_iter;
+ struct scatterlist *sg;
+ size_t bufsize;
+ u64 *pte;
+ int ret, i, page_idx = 0;
- for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) {
- u64 *ptes;
- size_t bufsize;
- int ret, idx;
+ ret = nvkm_gsp_mem_ctor(gsp, GSP_PAGE_SIZE, &rx3->lvl0);
+ if (ret)
+ return ret;
- bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
- ret = nvkm_gsp_mem_ctor(gsp, bufsize, &rx3->mem[i]);
- if (ret)
- return ret;
+ ret = nvkm_gsp_mem_ctor(gsp, GSP_PAGE_SIZE, &rx3->lvl1);
+ if (ret)
+ goto lvl1_fail;
- ptes = rx3->mem[i].data;
- if (i == 2) {
- struct scatterlist *sgl;
+ // Allocate level 2
+ bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
+ ret = nvkm_gsp_sg(gsp->subdev.device, bufsize, &rx3->lvl2);
+ if (ret)
+ goto lvl2_fail;
- for_each_sgtable_dma_sg(sgt, sgl, idx) {
- for (int j = 0; j < sg_dma_len(sgl) / GSP_PAGE_SIZE; j++)
- *ptes++ = sg_dma_address(sgl) + (GSP_PAGE_SIZE * j);
- }
- } else {
- for (int j = 0; j < size / GSP_PAGE_SIZE; j++)
- *ptes++ = addr + GSP_PAGE_SIZE * j;
+ // Write the bus address of level 1 to level 0
+ pte = rx3->lvl0.data;
+ *pte = rx3->lvl1.addr;
+
+ // Write the bus address of each page in level 2 to level 1
+ pte = rx3->lvl1.data;
+ for_each_sgtable_dma_page(&rx3->lvl2, &sg_dma_iter, 0)
+ *pte++ = sg_page_iter_dma_address(&sg_dma_iter);
+
+ // Finally, write the bus address of each page in sgt to level 2
+ for_each_sgtable_sg(&rx3->lvl2, sg, i) {
+ void *sgl_end;
+
+ pte = sg_virt(sg);
+ sgl_end = (void *)pte + sg->length;
+
+ for_each_sgtable_dma_page(sgt, &sg_dma_iter, page_idx) {
+ *pte++ = sg_page_iter_dma_address(&sg_dma_iter);
+ page_idx++;
+
+ // Go to the next scatterlist for level 2 if we've reached the end
+ if ((void *)pte >= sgl_end)
+ break;
}
+ }
- size = rx3->mem[i].size;
- addr = rx3->mem[i].addr;
+ if (ret) {
+lvl2_fail:
+ nvkm_gsp_mem_dtor(gsp, &rx3->lvl1);
+lvl1_fail:
+ nvkm_gsp_mem_dtor(gsp, &rx3->lvl0);
}
- return 0;
+ return ret;
}
int
@@ -2021,7 +2046,7 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
sr = gsp->sr.meta.data;
sr->magic = GSP_FW_SR_META_MAGIC;
sr->revision = GSP_FW_SR_META_REVISION;
- sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.mem[0].addr;
+ sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.lvl0.addr;
sr->sizeOfSuspendResumeData = len;
mbox0 = lower_32_bits(gsp->sr.meta.addr);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index a7f3fc342d87..dd5b5a17ece0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -222,8 +222,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
void __iomem *map = NULL;
/* Already mapped? */
- if (refcount_inc_not_zero(&iobj->maps))
+ if (refcount_inc_not_zero(&iobj->maps)) {
+ /* read barrier match the wmb on refcount set */
+ smp_rmb();
return iobj->map;
+ }
/* Take the lock, and re-check that another thread hasn't
* already mapped the object in the meantime.
@@ -250,6 +253,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
iobj->base.memory.ptrs = &nv50_instobj_fast;
else
iobj->base.memory.ptrs = &nv50_instobj_slow;
+ /* barrier to ensure the ptrs are written before refcount is set */
+ smp_wmb();
refcount_set(&iobj->maps, 1);
}
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index d037b3b8b999..5b15d0294836 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -177,7 +177,7 @@ config DRM_PANEL_ILITEK_IL9322
config DRM_PANEL_ILITEK_ILI9341
tristate "Ilitek ILI9341 240x320 QVGA panels"
- depends on OF && SPI
+ depends on SPI
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
depends on BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
index 3574681891e8..b933380b7eb7 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
@@ -22,8 +22,9 @@
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
@@ -421,7 +422,7 @@ static int ili9341_dpi_prepare(struct drm_panel *panel)
ili9341_dpi_init(ili);
- return ret;
+ return 0;
}
static int ili9341_dpi_enable(struct drm_panel *panel)
@@ -691,7 +692,7 @@ static int ili9341_dpi_probe(struct spi_device *spi, struct gpio_desc *dc,
* Every new incarnation of this display must have a unique
* data entry for the system in this driver.
*/
- ili->conf = of_device_get_match_data(dev);
+ ili->conf = device_get_match_data(dev);
if (!ili->conf) {
dev_err(dev, "missing device configuration\n");
return -ENODEV;
@@ -714,18 +715,18 @@ static int ili9341_probe(struct spi_device *spi)
reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(reset))
- dev_err(dev, "Failed to get gpio 'reset'\n");
+ return dev_err_probe(dev, PTR_ERR(reset), "Failed to get gpio 'reset'\n");
dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
if (IS_ERR(dc))
- dev_err(dev, "Failed to get gpio 'dc'\n");
+ return dev_err_probe(dev, PTR_ERR(dc), "Failed to get gpio 'dc'\n");
if (!strcmp(id->name, "sf-tc240t-9370-t"))
return ili9341_dpi_probe(spi, dc, reset);
else if (!strcmp(id->name, "yx240qv29"))
return ili9341_dbi_probe(spi, dc, reset);
- return -1;
+ return -ENODEV;
}
static void ili9341_remove(struct spi_device *spi)
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
index cb7406d74466..c39fe0fc5d69 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
@@ -614,8 +614,6 @@ static void nt36672e_panel_remove(struct mipi_dsi_device *dsi)
struct nt36672e_panel *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(ctx->dsi);
- mipi_dsi_device_unregister(ctx->dsi);
-
drm_panel_remove(&ctx->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
index 775144695283..b15ca56a09a7 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -253,8 +253,6 @@ static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(ctx->dsi);
- mipi_dsi_device_unregister(ctx->dsi);
-
drm_panel_remove(&ctx->panel);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index f38385fe76bb..b91019cd5acb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -502,11 +502,18 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
mapping_set_unevictable(mapping);
for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
+ /* Can happen if the last fault only partially filled this
+ * section of the pages array before failing. In that case
+ * we skip already filled pages.
+ */
+ if (pages[i])
+ continue;
+
pages[i] = shmem_read_mapping_page(mapping, i);
if (IS_ERR(pages[i])) {
ret = PTR_ERR(pages[i]);
pages[i] = NULL;
- goto err_pages;
+ goto err_unlock;
}
}
@@ -514,7 +521,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
if (ret)
- goto err_pages;
+ goto err_unlock;
ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
if (ret)
@@ -537,8 +544,6 @@ out:
err_map:
sg_free_table(sgt);
-err_pages:
- drm_gem_shmem_put_pages(&bo->base);
err_unlock:
dma_resv_unlock(obj->resv);
err_bo:
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h
index 94947229888b..b7f22597ee95 100644
--- a/drivers/gpu/drm/radeon/pptable.h
+++ b/drivers/gpu/drm/radeon/pptable.h
@@ -424,7 +424,7 @@ typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
typedef struct _ATOM_PPLIB_STATE_V2
{
//number of valid dpm levels in this state; Driver uses it to calculate the whole
- //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
+ //size of the state: struct_size(ATOM_PPLIB_STATE_V2, clockInfoIndex, ucNumDPMLevels)
UCHAR ucNumDPMLevels;
//a index to the array of nonClockInfos
@@ -432,14 +432,14 @@ typedef struct _ATOM_PPLIB_STATE_V2
/**
* Driver will read the first ucNumDPMLevels in this array
*/
- UCHAR clockInfoIndex[1];
+ UCHAR clockInfoIndex[] __counted_by(ucNumDPMLevels);
} ATOM_PPLIB_STATE_V2;
typedef struct _StateArray{
//how many states we have
UCHAR ucNumEntries;
- ATOM_PPLIB_STATE_V2 states[1];
+ ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries);
}StateArray;
@@ -450,7 +450,7 @@ typedef struct _ClockInfoArray{
//sizeof(ATOM_PPLIB_CLOCK_INFO)
UCHAR ucEntrySize;
- UCHAR clockInfo[1];
+ UCHAR clockInfo[] __counted_by(ucNumEntries);
}ClockInfoArray;
typedef struct _NonClockInfoArray{
@@ -460,7 +460,7 @@ typedef struct _NonClockInfoArray{
//sizeof(ATOM_PPLIB_NONCLOCK_INFO)
UCHAR ucEntrySize;
- ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+ ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries);
}NonClockInfoArray;
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index bb1f0a3371ab..10793a433bf5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -923,8 +923,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
for (i = 0; i < max_device; i++) {
- ATOM_CONNECTOR_INFO_I2C ci =
- supported_devices->info.asConnInfo[i];
+ ATOM_CONNECTOR_INFO_I2C ci;
+
+ if (frev > 1)
+ ci = supported_devices->info_2d1.asConnInfo[i];
+ else
+ ci = supported_devices->info.asConnInfo[i];
bios_connectors[i].valid = false;
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 112438d965ff..6e1fd6985ffc 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -288,17 +288,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
enum ttm_caching caching,
unsigned int order)
{
- if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
+ if (pool->use_dma_alloc)
return &pool->caching[caching].orders[order];
#ifdef CONFIG_X86
switch (caching) {
case ttm_write_combined:
+ if (pool->nid != NUMA_NO_NODE)
+ return &pool->caching[caching].orders[order];
+
if (pool->use_dma32)
return &global_dma32_write_combined[order];
return &global_write_combined[order];
case ttm_uncached:
+ if (pool->nid != NUMA_NO_NODE)
+ return &pool->caching[caching].orders[order];
+
if (pool->use_dma32)
return &global_dma32_uncached[order];
@@ -566,11 +572,17 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
pool->use_dma_alloc = use_dma_alloc;
pool->use_dma32 = use_dma32;
- if (use_dma_alloc || nid != NUMA_NO_NODE) {
- for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j < NR_PAGE_ORDERS; ++j)
- ttm_pool_type_init(&pool->caching[i].orders[j],
- pool, i, j);
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+ struct ttm_pool_type *pt;
+
+ /* Initialize only pool types which are actually used */
+ pt = ttm_pool_select_type(pool, i, j);
+ if (pt != &pool->caching[i].orders[j])
+ continue;
+
+ ttm_pool_type_init(pt, pool, i, j);
+ }
}
}
EXPORT_SYMBOL(ttm_pool_init);
@@ -599,10 +611,16 @@ void ttm_pool_fini(struct ttm_pool *pool)
{
unsigned int i, j;
- if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
- for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j < NR_PAGE_ORDERS; ++j)
- ttm_pool_type_fini(&pool->caching[i].orders[j]);
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+ struct ttm_pool_type *pt;
+
+ pt = ttm_pool_select_type(pool, i, j);
+ if (pt != &pool->caching[i].orders[j])
+ continue;
+
+ ttm_pool_type_fini(pt);
+ }
}
/* We removed the pool types from the LRU, but we need to also make sure
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 578a7c37f00b..d776e3f87064 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -92,7 +92,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
*/
if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
page_flags |= TTM_TT_FLAG_DECRYPTED;
- drm_info(ddev, "TT memory decryption enabled.");
+ drm_info_once(ddev, "TT memory decryption enabled.");
}
bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 2e04f6cb661e..ce6b2fb341d1 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -105,7 +105,6 @@ v3d_irq(int irq, void *arg)
struct v3d_file_priv *file = v3d->bin_job->base.file->driver_priv;
u64 runtime = local_clock() - file->start_ns[V3D_BIN];
- file->enabled_ns[V3D_BIN] += local_clock() - file->start_ns[V3D_BIN];
file->jobs_sent[V3D_BIN]++;
v3d->queue[V3D_BIN].jobs_sent++;
@@ -126,7 +125,6 @@ v3d_irq(int irq, void *arg)
struct v3d_file_priv *file = v3d->render_job->base.file->driver_priv;
u64 runtime = local_clock() - file->start_ns[V3D_RENDER];
- file->enabled_ns[V3D_RENDER] += local_clock() - file->start_ns[V3D_RENDER];
file->jobs_sent[V3D_RENDER]++;
v3d->queue[V3D_RENDER].jobs_sent++;
@@ -147,7 +145,6 @@ v3d_irq(int irq, void *arg)
struct v3d_file_priv *file = v3d->csd_job->base.file->driver_priv;
u64 runtime = local_clock() - file->start_ns[V3D_CSD];
- file->enabled_ns[V3D_CSD] += local_clock() - file->start_ns[V3D_CSD];
file->jobs_sent[V3D_CSD]++;
v3d->queue[V3D_CSD].jobs_sent++;
@@ -195,7 +192,6 @@ v3d_hub_irq(int irq, void *arg)
struct v3d_file_priv *file = v3d->tfu_job->base.file->driver_priv;
u64 runtime = local_clock() - file->start_ns[V3D_TFU];
- file->enabled_ns[V3D_TFU] += local_clock() - file->start_ns[V3D_TFU];
file->jobs_sent[V3D_TFU]++;
v3d->queue[V3D_TFU].jobs_sent++;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index c52c7bf1485b..717d624e9a05 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -456,8 +456,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
.no_wait_gpu = false
};
u32 j, initial_line = dst_offset / dst_stride;
- struct vmw_bo_blit_line_data d;
+ struct vmw_bo_blit_line_data d = {0};
int ret = 0;
+ struct page **dst_pages = NULL;
+ struct page **src_pages = NULL;
/* Buffer objects need to be either pinned or reserved: */
if (!(dst->pin_count))
@@ -477,12 +479,35 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
return ret;
}
+ if (!src->ttm->pages && src->ttm->sg) {
+ src_pages = kvmalloc_array(src->ttm->num_pages,
+ sizeof(struct page *), GFP_KERNEL);
+ if (!src_pages)
+ return -ENOMEM;
+ ret = drm_prime_sg_to_page_array(src->ttm->sg, src_pages,
+ src->ttm->num_pages);
+ if (ret)
+ goto out;
+ }
+ if (!dst->ttm->pages && dst->ttm->sg) {
+ dst_pages = kvmalloc_array(dst->ttm->num_pages,
+ sizeof(struct page *), GFP_KERNEL);
+ if (!dst_pages) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = drm_prime_sg_to_page_array(dst->ttm->sg, dst_pages,
+ dst->ttm->num_pages);
+ if (ret)
+ goto out;
+ }
+
d.mapped_dst = 0;
d.mapped_src = 0;
d.dst_addr = NULL;
d.src_addr = NULL;
- d.dst_pages = dst->ttm->pages;
- d.src_pages = src->ttm->pages;
+ d.dst_pages = dst->ttm->pages ? dst->ttm->pages : dst_pages;
+ d.src_pages = src->ttm->pages ? src->ttm->pages : src_pages;
d.dst_num_pages = PFN_UP(dst->resource->size);
d.src_num_pages = PFN_UP(src->resource->size);
d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
@@ -504,6 +529,10 @@ out:
kunmap_atomic(d.src_addr);
if (d.dst_addr)
kunmap_atomic(d.dst_addr);
+ if (src_pages)
+ kvfree(src_pages);
+ if (dst_pages)
+ kvfree(dst_pages);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index bfd41ce3c8f4..00144632c600 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -204,6 +204,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_VRAM);
buf->places[0].lpfn = PFN_UP(bo->resource->size);
+ buf->busy_places[0].lpfn = PFN_UP(bo->resource->size);
ret = ttm_bo_validate(bo, &buf->placement, &ctx);
/* For some reason we didn't end up at the start of vram */
@@ -377,7 +378,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
{
struct ttm_operation_ctx ctx = {
.interruptible = params->bo_type != ttm_bo_type_kernel,
- .no_wait_gpu = false
+ .no_wait_gpu = false,
+ .resv = params->resv,
};
struct ttm_device *bdev = &dev_priv->bdev;
struct drm_device *vdev = &dev_priv->drm;
@@ -394,8 +396,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
- &vmw_bo->placement, 0, &ctx, NULL,
- NULL, destroy);
+ &vmw_bo->placement, 0, &ctx,
+ params->sg, params->resv, destroy);
if (unlikely(ret))
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 0d496dc9c6af..f349642e6190 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -55,6 +55,8 @@ struct vmw_bo_params {
enum ttm_bo_type bo_type;
size_t size;
bool pin;
+ struct dma_resv *resv;
+ struct sg_table *sg;
};
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index c7d90f96d16a..58fb40c93100 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -666,11 +666,12 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_map_populate] = "Caching DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
- /* TTM currently doesn't fully support SEV encryption. */
- if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
- return -EINVAL;
-
- if (vmw_force_coherent)
+ /*
+ * When running with SEV we always want dma mappings, because
+ * otherwise ttm tt pool pages will bounce through swiotlb running
+ * out of available space.
+ */
+ if (vmw_force_coherent || cc_platform_has(CC_ATTR_MEM_ENCRYPT))
dev_priv->map_mode = vmw_dma_alloc_coherent;
else if (vmw_restrict_iommu)
dev_priv->map_mode = vmw_dma_map_bind;
@@ -1627,6 +1628,7 @@ static const struct drm_driver driver = {
.prime_fd_to_handle = vmw_prime_fd_to_handle,
.prime_handle_to_fd = vmw_prime_handle_to_fd,
+ .gem_prime_import_sg_table = vmw_prime_import_sg_table,
.fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 12efecc17df6..b019a1a1787a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1130,6 +1130,9 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t handle, uint32_t flags,
int *prime_fd);
+struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *table);
/*
* MemoryOBject management - vmwgfx_mob.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 2a0cda324703..5efc6a766f64 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -991,7 +991,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
}
event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
- event->event.base.length = sizeof(*event);
+ event->event.base.length = sizeof(event->event);
event->event.user_data = user_data;
ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index 12787bb9c111..d6bcaf078b1f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -149,6 +149,38 @@ out_no_bo:
return ret;
}
+struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *table)
+{
+ int ret;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_gem_object *gem = NULL;
+ struct vmw_bo *vbo;
+ struct vmw_bo_params params = {
+ .domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
+ .bo_type = ttm_bo_type_sg,
+ .size = attach->dmabuf->size,
+ .pin = false,
+ .resv = attach->dmabuf->resv,
+ .sg = table,
+
+ };
+
+ dma_resv_lock(params.resv, NULL);
+
+ ret = vmw_bo_create(dev_priv, &params, &vbo);
+ if (ret != 0)
+ goto out_no_bo;
+
+ vbo->tbo.base.funcs = &vmw_gem_object_funcs;
+
+ gem = &vbo->tbo.base;
+out_no_bo:
+ dma_resv_unlock(params.resv);
+ return gem;
+}
int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index cd4925346ed4..84ae4e10a2eb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -933,6 +933,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
@@ -940,9 +941,13 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
bool has_primary = new_state->plane_mask &
drm_plane_mask(crtc->primary);
- /* We always want to have an active plane with an active CRTC */
- if (has_primary != new_state->enable)
- return -EINVAL;
+ /*
+ * This is fine in general, but broken userspace might expect
+ * some actual rendering so give a clue as why it's blank.
+ */
+ if (new_state->enable && !has_primary)
+ drm_dbg_driver(&vmw->drm,
+ "CRTC without a primary plane will be blank.\n");
if (new_state->connector_mask != connector_mask &&
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index a94947b588e8..19a843da87b7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -243,10 +243,10 @@ struct vmw_framebuffer_bo {
static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB1555,
};
static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index 2d72a5ee7c0c..c99cad444991 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -75,8 +75,12 @@ int vmw_prime_fd_to_handle(struct drm_device *dev,
int fd, u32 *handle)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret = ttm_prime_fd_to_handle(tfile, fd, handle);
- return ttm_prime_fd_to_handle(tfile, fd, handle);
+ if (ret)
+ ret = drm_gem_prime_fd_to_handle(dev, file_priv, fd, handle);
+
+ return ret;
}
int vmw_prime_handle_to_fd(struct drm_device *dev,
@@ -85,5 +89,12 @@ int vmw_prime_handle_to_fd(struct drm_device *dev,
int *prime_fd)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+ int ret;
+
+ if (handle > VMWGFX_NUM_MOB)
+ ret = ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+ else
+ ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 4d23d0a70bcb..621d98b376bb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -188,13 +188,18 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
switch (dev_priv->map_mode) {
case vmw_dma_map_bind:
case vmw_dma_map_populate:
- vsgt->sgt = &vmw_tt->sgt;
- ret = sg_alloc_table_from_pages_segment(
- &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
- (unsigned long)vsgt->num_pages << PAGE_SHIFT,
- dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
- if (ret)
- goto out_sg_alloc_fail;
+ if (vmw_tt->dma_ttm.page_flags & TTM_TT_FLAG_EXTERNAL) {
+ vsgt->sgt = vmw_tt->dma_ttm.sg;
+ } else {
+ vsgt->sgt = &vmw_tt->sgt;
+ ret = sg_alloc_table_from_pages_segment(&vmw_tt->sgt,
+ vsgt->pages, vsgt->num_pages, 0,
+ (unsigned long)vsgt->num_pages << PAGE_SHIFT,
+ dma_get_max_seg_size(dev_priv->drm.dev),
+ GFP_KERNEL);
+ if (ret)
+ goto out_sg_alloc_fail;
+ }
ret = vmw_ttm_map_for_dma(vmw_tt);
if (unlikely(ret != 0))
@@ -209,8 +214,9 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
return 0;
out_map_fail:
- sg_free_table(vmw_tt->vsgt.sgt);
- vmw_tt->vsgt.sgt = NULL;
+ drm_warn(&dev_priv->drm, "VSG table map failed!");
+ sg_free_table(vsgt->sgt);
+ vsgt->sgt = NULL;
out_sg_alloc_fail:
return ret;
}
@@ -356,15 +362,17 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
static int vmw_ttm_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
- int ret;
+ bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
- /* TODO: maybe completely drop this ? */
if (ttm_tt_is_populated(ttm))
return 0;
- ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
+ if (external && ttm->sg)
+ return drm_prime_sg_to_dma_addr_array(ttm->sg,
+ ttm->dma_address,
+ ttm->num_pages);
- return ret;
+ return ttm_pool_alloc(&bdev->pool, ttm, ctx);
}
static void vmw_ttm_unpopulate(struct ttm_device *bdev,
@@ -372,6 +380,10 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
{
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
dma_ttm);
+ bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
+
+ if (external)
+ return;
vmw_ttm_unbind(bdev, ttm);
@@ -390,6 +402,7 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
{
struct vmw_ttm_tt *vmw_be;
int ret;
+ bool external = bo->type == ttm_bo_type_sg;
vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
if (!vmw_be)
@@ -398,7 +411,10 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
vmw_be->mob = NULL;
- if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
+ if (external)
+ page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
+
+ if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent || external)
ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
ttm_cached);
else
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index 420eba0e4be0..854a7bb53567 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -84,7 +84,8 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_ROCKETLAKE)
#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, XE_DG1)
#define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_S)
-#define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_P)
+#define IS_ALDERLAKE_P(dev_priv) (IS_PLATFORM(dev_priv, XE_ALDERLAKE_P) || \
+ IS_PLATFORM(dev_priv, XE_ALDERLAKE_N))
#define IS_XEHPSDV(dev_priv) (dev_priv && 0)
#define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, XE_DG2)
#define IS_PONTEVECCHIO(dev_priv) IS_PLATFORM(dev_priv, XE_PVC)
diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/intel_fb_bo.c
index b21da7b745a5..a9c1f9885c6b 100644
--- a/drivers/gpu/drm/xe/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_fb_bo.c
@@ -31,7 +31,7 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
ret = ttm_bo_reserve(&bo->ttm, true, false, NULL);
if (ret)
- return ret;
+ goto err;
if (!(bo->flags & XE_BO_SCANOUT_BIT)) {
/*
@@ -42,12 +42,16 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
*/
if (XE_IOCTL_DBG(i915, !list_empty(&bo->ttm.base.gpuva.list))) {
ttm_bo_unreserve(&bo->ttm);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
bo->flags |= XE_BO_SCANOUT_BIT;
}
ttm_bo_unreserve(&bo->ttm);
+ return 0;
+err:
+ xe_bo_put(bo);
return ret;
}
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index e4db069f0db3..6ec375c1c4b6 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -108,11 +108,6 @@ int xe_display_create(struct xe_device *xe)
xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
drmm_mutex_init(&xe->drm, &xe->sb_lock);
- drmm_mutex_init(&xe->drm, &xe->display.backlight.lock);
- drmm_mutex_init(&xe->drm, &xe->display.audio.mutex);
- drmm_mutex_init(&xe->drm, &xe->display.wm.wm_mutex);
- drmm_mutex_init(&xe->drm, &xe->display.pps.mutex);
- drmm_mutex_init(&xe->drm, &xe->display.hdcp.hdcp_mutex);
xe->enabled_irq_mask = ~0;
err = drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index 0b1266c88a6a..deddc8be48c0 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -125,7 +125,7 @@
#define RING_EXECLIST_STATUS_LO(base) XE_REG((base) + 0x234)
#define RING_EXECLIST_STATUS_HI(base) XE_REG((base) + 0x234 + 4)
-#define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244)
+#define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244, XE_REG_OPTION_MASKED)
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH REG_BIT(3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT REG_BIT(0)
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index a0afe1ba6dd5..f9705430ada9 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -378,7 +378,9 @@ static int gt_fw_domain_init(struct xe_gt *gt)
err);
/* Initialize CCS mode sysfs after early initialization of HW engines */
- xe_gt_ccs_mode_sysfs_init(gt);
+ err = xe_gt_ccs_mode_sysfs_init(gt);
+ if (err)
+ goto err_force_wake;
/*
* Stash hardware-reported version. Since this register does not exist
diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
index 529fc286cd06..396aeb5b9924 100644
--- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
+++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
@@ -167,25 +167,20 @@ static void xe_gt_ccs_mode_sysfs_fini(struct drm_device *drm, void *arg)
* and it is expected that there are no open drm clients while doing so.
* The number of available compute slices is exposed to user through a per-gt
* 'num_cslices' sysfs interface.
+ *
+ * Returns: Returns error value for failure and 0 for success.
*/
-void xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
+int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
int err;
if (!xe_gt_ccs_mode_enabled(gt))
- return;
+ return 0;
err = sysfs_create_files(gt->sysfs, gt_ccs_mode_attrs);
- if (err) {
- drm_warn(&xe->drm, "Sysfs creation for ccs_mode failed err: %d\n", err);
- return;
- }
+ if (err)
+ return err;
- err = drmm_add_action_or_reset(&xe->drm, xe_gt_ccs_mode_sysfs_fini, gt);
- if (err) {
- sysfs_remove_files(gt->sysfs, gt_ccs_mode_attrs);
- drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
- __func__, err);
- }
+ return drmm_add_action_or_reset(&xe->drm, xe_gt_ccs_mode_sysfs_fini, gt);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.h b/drivers/gpu/drm/xe/xe_gt_ccs_mode.h
index f39975aaaab0..f8779852cf0d 100644
--- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.h
+++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.h
@@ -12,7 +12,7 @@
#include "xe_platform_types.h"
void xe_gt_apply_ccs_mode(struct xe_gt *gt);
-void xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt);
+int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt);
static inline bool xe_gt_ccs_mode_enabled(const struct xe_gt *gt)
{
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 6ad4c1a90a78..5339f8a49004 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -100,7 +100,7 @@ struct __guc_ads_blob {
struct guc_engine_usage engine_usage;
struct guc_um_init_params um_init_params;
/* From here on, location is dynamic! Refer to above diagram. */
- struct guc_mmio_reg regset[0];
+ struct guc_mmio_reg regset[];
} __packed;
#define ads_blob_read(ads_, field_) \
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 355edd4d758a..8bbfa45798e2 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -120,6 +120,7 @@ static void guc_ct_fini(struct drm_device *drm, void *arg)
{
struct xe_guc_ct *ct = arg;
+ destroy_workqueue(ct->g2h_wq);
xa_destroy(&ct->fence_lookup);
}
@@ -145,13 +146,20 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
xe_assert(xe, !(guc_ct_size() % PAGE_SIZE));
- drmm_mutex_init(&xe->drm, &ct->lock);
+ ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", 0);
+ if (!ct->g2h_wq)
+ return -ENOMEM;
+
spin_lock_init(&ct->fast_lock);
xa_init(&ct->fence_lookup);
INIT_WORK(&ct->g2h_worker, g2h_worker_func);
init_waitqueue_head(&ct->wq);
init_waitqueue_head(&ct->g2h_fence_wq);
+ err = drmm_mutex_init(&xe->drm, &ct->lock);
+ if (err)
+ return err;
+
primelockdep(ct);
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
@@ -1054,10 +1062,10 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
adj_len);
break;
case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF:
- ret = xe_guc_relay_process_guc2pf(&guc->relay, payload, adj_len);
+ ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len);
break;
case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF:
- ret = xe_guc_relay_process_guc2vf(&guc->relay, payload, adj_len);
+ ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len);
break;
default:
drm_err(&xe->drm, "unexpected action 0x%04x\n", action);
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
index 5083e099064f..105bb8e99a8d 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct.h
@@ -34,7 +34,7 @@ static inline void xe_guc_ct_irq_handler(struct xe_guc_ct *ct)
return;
wake_up_all(&ct->wq);
- queue_work(system_unbound_wq, &ct->g2h_worker);
+ queue_work(ct->g2h_wq, &ct->g2h_worker);
xe_guc_ct_fast_path(ct);
}
diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h
index d29144c9f20b..fede4c6e93cb 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h
@@ -120,6 +120,8 @@ struct xe_guc_ct {
wait_queue_head_t wq;
/** @g2h_fence_wq: wait queue used for G2H fencing */
wait_queue_head_t g2h_fence_wq;
+ /** @g2h_wq: used to process G2H */
+ struct workqueue_struct *g2h_wq;
/** @msg: Message buffer */
u32 msg[GUC_CTB_MSG_MAX_LEN];
/** @fast_msg: Message buffer */
diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c
index b545f850087c..6b9b1cbedd37 100644
--- a/drivers/gpu/drm/xe/xe_huc.c
+++ b/drivers/gpu/drm/xe/xe_huc.c
@@ -53,7 +53,6 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc)
struct xe_gt *gt = huc_to_gt(huc);
struct xe_device *xe = gt_to_xe(gt);
struct xe_bo *bo;
- int err;
/* we use a single object for both input and output */
bo = xe_bo_create_pin_map(xe, gt_to_tile(gt), NULL,
@@ -66,13 +65,7 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc)
huc->gsc_pkt = bo;
- err = drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc);
- if (err) {
- free_gsc_pkt(&xe->drm, huc);
- return err;
- }
-
- return 0;
+ return drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc);
}
int xe_huc_init(struct xe_huc *huc)
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index b82233a41606..9ac7fbe201b3 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -290,7 +290,7 @@ xe_hwmon_power1_max_interval_show(struct device *dev, struct device_attribute *a
* As y can be < 2, we compute tau4 = (4 | x) << y
* and then add 2 when doing the final right shift to account for units
*/
- tau4 = ((1 << x_w) | x) << y;
+ tau4 = (u64)((1 << x_w) | x) << y;
/* val in hwmon interface units (millisec) */
out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
@@ -330,7 +330,7 @@ xe_hwmon_power1_max_interval_store(struct device *dev, struct device_attribute *
r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
- tau4 = ((1 << x_w) | x) << y;
+ tau4 = (u64)((1 << x_w) | x) << y;
max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
if (val > max_win)
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 1426febe86eb..57066faf575e 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -525,9 +525,8 @@ static const u8 *reg_offsets(struct xe_device *xe, enum xe_engine_class class)
static void set_context_control(u32 *regs, struct xe_hw_engine *hwe)
{
- regs[CTX_CONTEXT_CONTROL] = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH) |
- _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
- CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
+ regs[CTX_CONTEXT_CONTROL] = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
+ CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
/* TODO: Timestamp */
}
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index ee1bb938c493..2ba4fb9511f6 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -227,7 +227,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
if (vm->flags & XE_VM_FLAG_64K && level == 1)
flags = XE_PDE_64K;
- entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (level - 1) *
+ entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
XE_PAGE_SIZE, pat_index);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
entry | flags);
@@ -235,7 +235,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
/* Write PDE's that point to our BO. */
for (i = 0; i < num_entries - num_level; i++) {
- entry = vm->pt_ops->pde_encode_bo(bo, i * XE_PAGE_SIZE,
+ entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE,
pat_index);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
@@ -291,7 +291,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
#define VM_SA_UPDATE_UNIT_SIZE (XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
#define NUM_VMUSA_WRITES_PER_UNIT (VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
drm_suballoc_manager_init(&m->vm_update_sa,
- (map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
+ (size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
NUM_VMUSA_UNIT_PER_PAGE, 0);
m->pt_bo = bo;
@@ -490,7 +490,7 @@ static void emit_pte(struct xe_migrate *m,
struct xe_vm *vm = m->q->vm;
u16 pat_index;
u32 ptes;
- u64 ofs = at_pt * XE_PAGE_SIZE;
+ u64 ofs = (u64)at_pt * XE_PAGE_SIZE;
u64 cur_ofs;
/* Indirect access needs compression enabled uncached PAT index */
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 62d1ef8867a8..32cd0c978aa2 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1577,6 +1577,16 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe->usm.num_vm_in_fault_mode--;
else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
xe->usm.num_vm_in_non_fault_mode--;
+
+ if (vm->usm.asid) {
+ void *lookup;
+
+ xe_assert(xe, xe->info.has_asid);
+ xe_assert(xe, !(vm->flags & XE_VM_FLAG_MIGRATION));
+
+ lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
+ xe_assert(xe, lookup == vm);
+ }
mutex_unlock(&xe->usm.lock);
for_each_tile(tile, xe, id)
@@ -1592,24 +1602,18 @@ static void vm_destroy_work_func(struct work_struct *w)
struct xe_device *xe = vm->xe;
struct xe_tile *tile;
u8 id;
- void *lookup;
/* xe_vm_close_and_put was not called? */
xe_assert(xe, !vm->size);
+ if (xe_vm_in_preempt_fence_mode(vm))
+ flush_work(&vm->preempt.rebind_work);
+
mutex_destroy(&vm->snap_mutex);
- if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
+ if (!(vm->flags & XE_VM_FLAG_MIGRATION))
xe_device_mem_access_put(xe);
- if (xe->info.has_asid && vm->usm.asid) {
- mutex_lock(&xe->usm.lock);
- lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
- xe_assert(xe, lookup == vm);
- mutex_unlock(&xe->usm.lock);
- }
- }
-
for_each_tile(tile, xe, id)
XE_WARN_ON(vm->pt_root[id]);
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 783975d1384f..7c52757a89db 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -351,11 +351,6 @@ static int host1x_device_uevent(const struct device *dev,
return 0;
}
-static int host1x_dma_configure(struct device *dev)
-{
- return of_dma_configure(dev, dev->of_node, true);
-}
-
static const struct dev_pm_ops host1x_device_pm_ops = {
.suspend = pm_generic_suspend,
.resume = pm_generic_resume,
@@ -369,7 +364,6 @@ const struct bus_type host1x_bus_type = {
.name = "host1x",
.match = host1x_device_match,
.uevent = host1x_device_uevent,
- .dma_configure = host1x_dma_configure,
.pm = &host1x_device_pm_ops,
};
@@ -458,8 +452,6 @@ static int host1x_device_add(struct host1x *host1x,
device->dev.bus = &host1x_bus_type;
device->dev.parent = host1x->dev;
- of_dma_configure(&device->dev, host1x->dev->of_node, true);
-
device->dev.dma_parms = &device->dma_parms;
dma_set_max_seg_size(&device->dev, UINT_MAX);
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 89983d7d73ca..3a0aaa68ac8d 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -216,6 +216,30 @@ static const struct host1x_info host1x07_info = {
*/
static const struct host1x_sid_entry tegra234_sid_table[] = {
{
+ /* SE2 MMIO */
+ .base = 0x1658,
+ .offset = 0x90,
+ .limit = 0x90
+ },
+ {
+ /* SE4 MMIO */
+ .base = 0x1660,
+ .offset = 0x90,
+ .limit = 0x90
+ },
+ {
+ /* SE2 channel */
+ .base = 0x1738,
+ .offset = 0x90,
+ .limit = 0x90
+ },
+ {
+ /* SE4 channel */
+ .base = 0x1740,
+ .offset = 0x90,
+ .limit = 0x90
+ },
+ {
/* VIC channel */
.base = 0x17b8,
.offset = 0x30,
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index e6a8b6d8eab7..3c3c497b6b91 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -965,9 +965,7 @@ static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev,
}
break;
case REPORT_TYPE_MOUSE:
- workitem->reports_supported |= STD_MOUSE | HIDPP;
- if (djrcv_dev->type == recvr_type_mouse_only)
- workitem->reports_supported |= MULTIMEDIA;
+ workitem->reports_supported |= STD_MOUSE | HIDPP | MULTIMEDIA;
break;
}
}
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
index f9cceaeffd08..da5ea5a23b08 100644
--- a/drivers/hid/hid-mcp2221.c
+++ b/drivers/hid/hid-mcp2221.c
@@ -944,9 +944,11 @@ static void mcp2221_hid_unregister(void *ptr)
/* This is needed to be sure hid_hw_stop() isn't called twice by the subsystem */
static void mcp2221_remove(struct hid_device *hdev)
{
+#if IS_REACHABLE(CONFIG_IIO)
struct mcp2221 *mcp = hid_get_drvdata(hdev);
cancel_delayed_work_sync(&mcp->init_work);
+#endif
}
#if IS_REACHABLE(CONFIG_IIO)
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index ab5953fc2436..80e0f23c1c33 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -481,10 +481,10 @@ static const struct joycon_ctlr_button_mapping n64con_button_mappings[] = {
{ BTN_TR, JC_BTN_R, },
{ BTN_TR2, JC_BTN_LSTICK, }, /* ZR */
{ BTN_START, JC_BTN_PLUS, },
- { BTN_FORWARD, JC_BTN_Y, }, /* C UP */
- { BTN_BACK, JC_BTN_ZR, }, /* C DOWN */
- { BTN_LEFT, JC_BTN_X, }, /* C LEFT */
- { BTN_RIGHT, JC_BTN_MINUS, }, /* C RIGHT */
+ { BTN_SELECT, JC_BTN_Y, }, /* C UP */
+ { BTN_X, JC_BTN_ZR, }, /* C DOWN */
+ { BTN_Y, JC_BTN_X, }, /* C LEFT */
+ { BTN_C, JC_BTN_MINUS, }, /* C RIGHT */
{ BTN_MODE, JC_BTN_HOME, },
{ BTN_Z, JC_BTN_CAP, },
{ /* sentinel */ },
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 2df1ab3c31cc..d965382196c6 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -64,7 +64,6 @@
/* flags */
#define I2C_HID_STARTED 0
#define I2C_HID_RESET_PENDING 1
-#define I2C_HID_READ_PENDING 2
#define I2C_HID_PWR_ON 0x00
#define I2C_HID_PWR_SLEEP 0x01
@@ -190,15 +189,10 @@ static int i2c_hid_xfer(struct i2c_hid *ihid,
msgs[n].len = recv_len;
msgs[n].buf = recv_buf;
n++;
-
- set_bit(I2C_HID_READ_PENDING, &ihid->flags);
}
ret = i2c_transfer(client->adapter, msgs, n);
- if (recv_len)
- clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
-
if (ret != n)
return ret < 0 ? ret : -EIO;
@@ -556,9 +550,6 @@ static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
{
struct i2c_hid *ihid = dev_id;
- if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
- return IRQ_HANDLED;
-
i2c_hid_get_input(ihid);
return IRQ_HANDLED;
@@ -735,12 +726,15 @@ static int i2c_hid_parse(struct hid_device *hid)
mutex_lock(&ihid->reset_lock);
do {
ret = i2c_hid_start_hwreset(ihid);
- if (ret)
+ if (ret == 0)
+ ret = i2c_hid_finish_hwreset(ihid);
+ else
msleep(1000);
} while (tries-- > 0 && ret);
+ mutex_unlock(&ihid->reset_lock);
if (ret)
- goto abort_reset;
+ return ret;
use_override = i2c_hid_get_dmi_hid_report_desc_override(client->name,
&rsize);
@@ -750,11 +744,8 @@ static int i2c_hid_parse(struct hid_device *hid)
i2c_hid_dbg(ihid, "Using a HID report descriptor override\n");
} else {
rdesc = kzalloc(rsize, GFP_KERNEL);
-
- if (!rdesc) {
- ret = -ENOMEM;
- goto abort_reset;
- }
+ if (!rdesc)
+ return -ENOMEM;
i2c_hid_dbg(ihid, "asking HID report descriptor\n");
@@ -763,23 +754,10 @@ static int i2c_hid_parse(struct hid_device *hid)
rdesc, rsize);
if (ret) {
hid_err(hid, "reading report descriptor failed\n");
- goto abort_reset;
+ goto out;
}
}
- /*
- * Windows directly reads the report-descriptor after sending reset
- * and then waits for resets completion afterwards. Some touchpads
- * actually wait for the report-descriptor to be read before signalling
- * reset completion.
- */
- ret = i2c_hid_finish_hwreset(ihid);
-abort_reset:
- clear_bit(I2C_HID_RESET_PENDING, &ihid->flags);
- mutex_unlock(&ihid->reset_lock);
- if (ret)
- goto out;
-
i2c_hid_dbg(ihid, "Report Descriptor: %*ph\n", rsize, rdesc);
ret = hid_parse_report(hid, rdesc, rsize);
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index a49c6affd7c4..dd5fc60874ba 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -948,6 +948,7 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
if (!dev)
return NULL;
+ dev->devc = &pdev->dev;
ishtp_device_init(dev);
init_waitqueue_head(&dev->wait_hw_ready);
@@ -983,7 +984,6 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
}
dev->ops = &ish_hw_ops;
- dev->devc = &pdev->dev;
dev->mtu = IPC_PAYLOAD_SIZE - sizeof(struct ishtp_msg_hdr);
return dev;
}
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index adbf674355b2..fb8cd8469328 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -153,7 +153,9 @@ void vmbus_free_ring(struct vmbus_channel *channel)
hv_ringbuffer_cleanup(&channel->inbound);
if (channel->ringbuffer_page) {
- __free_pages(channel->ringbuffer_page,
+ /* In a CoCo VM leak the memory if it didn't get re-encrypted */
+ if (!channel->ringbuffer_gpadlhandle.decrypted)
+ __free_pages(channel->ringbuffer_page,
get_order(channel->ringbuffer_pagecount
<< PAGE_SHIFT));
channel->ringbuffer_page = NULL;
@@ -436,9 +438,18 @@ static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
(atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
ret = create_gpadl_header(type, kbuffer, size, send_offset, &msginfo);
- if (ret)
+ if (ret) {
+ gpadl->decrypted = false;
return ret;
+ }
+ /*
+ * Set the "decrypted" flag to true for the set_memory_decrypted()
+ * success case. In the failure case, the encryption state of the
+ * memory is unknown. Leave "decrypted" as true to ensure the
+ * memory will be leaked instead of going back on the free list.
+ */
+ gpadl->decrypted = true;
ret = set_memory_decrypted((unsigned long)kbuffer,
PFN_UP(size));
if (ret) {
@@ -527,9 +538,15 @@ cleanup:
kfree(msginfo);
- if (ret)
- set_memory_encrypted((unsigned long)kbuffer,
- PFN_UP(size));
+ if (ret) {
+ /*
+ * If set_memory_encrypted() fails, the decrypted flag is
+ * left as true so the memory is leaked instead of being
+ * put back on the free list.
+ */
+ if (!set_memory_encrypted((unsigned long)kbuffer, PFN_UP(size)))
+ gpadl->decrypted = false;
+ }
return ret;
}
@@ -850,6 +867,8 @@ post_msg_err:
if (ret)
pr_warn("Fail to set mem host visibility in GPADL teardown %d.\n", ret);
+ gpadl->decrypted = ret;
+
return ret;
}
EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 3cabeeabb1ca..f001ae880e1d 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -237,8 +237,17 @@ int vmbus_connect(void)
vmbus_connection.monitor_pages[0], 1);
ret |= set_memory_decrypted((unsigned long)
vmbus_connection.monitor_pages[1], 1);
- if (ret)
+ if (ret) {
+ /*
+ * If set_memory_decrypted() fails, the encryption state
+ * of the memory is unknown. So leak the memory instead
+ * of risking returning decrypted memory to the free list.
+ * For simplicity, always handle both pages the same.
+ */
+ vmbus_connection.monitor_pages[0] = NULL;
+ vmbus_connection.monitor_pages[1] = NULL;
goto cleanup;
+ }
/*
* Set_memory_decrypted() will change the memory contents if
@@ -337,13 +346,19 @@ void vmbus_disconnect(void)
vmbus_connection.int_page = NULL;
}
- set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[0], 1);
- set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[1], 1);
+ if (vmbus_connection.monitor_pages[0]) {
+ if (!set_memory_encrypted(
+ (unsigned long)vmbus_connection.monitor_pages[0], 1))
+ hv_free_hyperv_page(vmbus_connection.monitor_pages[0]);
+ vmbus_connection.monitor_pages[0] = NULL;
+ }
- hv_free_hyperv_page(vmbus_connection.monitor_pages[0]);
- hv_free_hyperv_page(vmbus_connection.monitor_pages[1]);
- vmbus_connection.monitor_pages[0] = NULL;
- vmbus_connection.monitor_pages[1] = NULL;
+ if (vmbus_connection.monitor_pages[1]) {
+ if (!set_memory_encrypted(
+ (unsigned long)vmbus_connection.monitor_pages[1], 1))
+ hv_free_hyperv_page(vmbus_connection.monitor_pages[1]);
+ vmbus_connection.monitor_pages[1] = NULL;
+ }
}
/*
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 4cb17603a828..12a707ab73f8 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -131,7 +131,7 @@ static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
+ return sysfs_emit(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
}
static DEVICE_ATTR_RO(id);
@@ -142,7 +142,7 @@ static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n", hv_dev->channel->state);
+ return sysfs_emit(buf, "%d\n", hv_dev->channel->state);
}
static DEVICE_ATTR_RO(state);
@@ -153,7 +153,7 @@ static ssize_t monitor_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
+ return sysfs_emit(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
}
static DEVICE_ATTR_RO(monitor_id);
@@ -164,8 +164,8 @@ static ssize_t class_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "{%pUl}\n",
- &hv_dev->channel->offermsg.offer.if_type);
+ return sysfs_emit(buf, "{%pUl}\n",
+ &hv_dev->channel->offermsg.offer.if_type);
}
static DEVICE_ATTR_RO(class_id);
@@ -176,8 +176,8 @@ static ssize_t device_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "{%pUl}\n",
- &hv_dev->channel->offermsg.offer.if_instance);
+ return sysfs_emit(buf, "{%pUl}\n",
+ &hv_dev->channel->offermsg.offer.if_instance);
}
static DEVICE_ATTR_RO(device_id);
@@ -186,7 +186,7 @@ static ssize_t modalias_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
- return sprintf(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
+ return sysfs_emit(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
}
static DEVICE_ATTR_RO(modalias);
@@ -199,7 +199,7 @@ static ssize_t numa_node_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
+ return sysfs_emit(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
}
static DEVICE_ATTR_RO(numa_node);
#endif
@@ -212,9 +212,8 @@ static ssize_t server_monitor_pending_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_pending(hv_dev->channel,
- vmbus_connection.monitor_pages[0]));
+ return sysfs_emit(buf, "%d\n", channel_pending(hv_dev->channel,
+ vmbus_connection.monitor_pages[0]));
}
static DEVICE_ATTR_RO(server_monitor_pending);
@@ -226,9 +225,8 @@ static ssize_t client_monitor_pending_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_pending(hv_dev->channel,
- vmbus_connection.monitor_pages[1]));
+ return sysfs_emit(buf, "%d\n", channel_pending(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
}
static DEVICE_ATTR_RO(client_monitor_pending);
@@ -240,9 +238,8 @@ static ssize_t server_monitor_latency_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_latency(hv_dev->channel,
- vmbus_connection.monitor_pages[0]));
+ return sysfs_emit(buf, "%d\n", channel_latency(hv_dev->channel,
+ vmbus_connection.monitor_pages[0]));
}
static DEVICE_ATTR_RO(server_monitor_latency);
@@ -254,9 +251,8 @@ static ssize_t client_monitor_latency_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_latency(hv_dev->channel,
- vmbus_connection.monitor_pages[1]));
+ return sysfs_emit(buf, "%d\n", channel_latency(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
}
static DEVICE_ATTR_RO(client_monitor_latency);
@@ -268,9 +264,8 @@ static ssize_t server_monitor_conn_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_conn_id(hv_dev->channel,
- vmbus_connection.monitor_pages[0]));
+ return sysfs_emit(buf, "%d\n", channel_conn_id(hv_dev->channel,
+ vmbus_connection.monitor_pages[0]));
}
static DEVICE_ATTR_RO(server_monitor_conn_id);
@@ -282,9 +277,8 @@ static ssize_t client_monitor_conn_id_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n",
- channel_conn_id(hv_dev->channel,
- vmbus_connection.monitor_pages[1]));
+ return sysfs_emit(buf, "%d\n", channel_conn_id(hv_dev->channel,
+ vmbus_connection.monitor_pages[1]));
}
static DEVICE_ATTR_RO(client_monitor_conn_id);
@@ -303,7 +297,7 @@ static ssize_t out_intr_mask_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
+ return sysfs_emit(buf, "%d\n", outbound.current_interrupt_mask);
}
static DEVICE_ATTR_RO(out_intr_mask);
@@ -321,7 +315,7 @@ static ssize_t out_read_index_show(struct device *dev,
&outbound);
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", outbound.current_read_index);
+ return sysfs_emit(buf, "%d\n", outbound.current_read_index);
}
static DEVICE_ATTR_RO(out_read_index);
@@ -340,7 +334,7 @@ static ssize_t out_write_index_show(struct device *dev,
&outbound);
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", outbound.current_write_index);
+ return sysfs_emit(buf, "%d\n", outbound.current_write_index);
}
static DEVICE_ATTR_RO(out_write_index);
@@ -359,7 +353,7 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
&outbound);
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
+ return sysfs_emit(buf, "%d\n", outbound.bytes_avail_toread);
}
static DEVICE_ATTR_RO(out_read_bytes_avail);
@@ -378,7 +372,7 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
&outbound);
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
+ return sysfs_emit(buf, "%d\n", outbound.bytes_avail_towrite);
}
static DEVICE_ATTR_RO(out_write_bytes_avail);
@@ -396,7 +390,7 @@ static ssize_t in_intr_mask_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
+ return sysfs_emit(buf, "%d\n", inbound.current_interrupt_mask);
}
static DEVICE_ATTR_RO(in_intr_mask);
@@ -414,7 +408,7 @@ static ssize_t in_read_index_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", inbound.current_read_index);
+ return sysfs_emit(buf, "%d\n", inbound.current_read_index);
}
static DEVICE_ATTR_RO(in_read_index);
@@ -432,7 +426,7 @@ static ssize_t in_write_index_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", inbound.current_write_index);
+ return sysfs_emit(buf, "%d\n", inbound.current_write_index);
}
static DEVICE_ATTR_RO(in_write_index);
@@ -451,7 +445,7 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
+ return sysfs_emit(buf, "%d\n", inbound.bytes_avail_toread);
}
static DEVICE_ATTR_RO(in_read_bytes_avail);
@@ -470,7 +464,7 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
+ return sysfs_emit(buf, "%d\n", inbound.bytes_avail_towrite);
}
static DEVICE_ATTR_RO(in_write_bytes_avail);
@@ -480,7 +474,7 @@ static ssize_t channel_vp_mapping_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
- int buf_size = PAGE_SIZE, n_written, tot_written;
+ int n_written;
struct list_head *cur;
if (!channel)
@@ -488,25 +482,21 @@ static ssize_t channel_vp_mapping_show(struct device *dev,
mutex_lock(&vmbus_connection.channel_mutex);
- tot_written = snprintf(buf, buf_size, "%u:%u\n",
- channel->offermsg.child_relid, channel->target_cpu);
+ n_written = sysfs_emit(buf, "%u:%u\n",
+ channel->offermsg.child_relid,
+ channel->target_cpu);
list_for_each(cur, &channel->sc_list) {
- if (tot_written >= buf_size - 1)
- break;
cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
- n_written = scnprintf(buf + tot_written,
- buf_size - tot_written,
- "%u:%u\n",
- cur_sc->offermsg.child_relid,
- cur_sc->target_cpu);
- tot_written += n_written;
+ n_written += sysfs_emit_at(buf, n_written, "%u:%u\n",
+ cur_sc->offermsg.child_relid,
+ cur_sc->target_cpu);
}
mutex_unlock(&vmbus_connection.channel_mutex);
- return tot_written;
+ return n_written;
}
static DEVICE_ATTR_RO(channel_vp_mapping);
@@ -516,7 +506,7 @@ static ssize_t vendor_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
- return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
+ return sysfs_emit(buf, "0x%x\n", hv_dev->vendor_id);
}
static DEVICE_ATTR_RO(vendor);
@@ -526,7 +516,7 @@ static ssize_t device_show(struct device *dev,
{
struct hv_device *hv_dev = device_to_hv_device(dev);
- return sprintf(buf, "0x%x\n", hv_dev->device_id);
+ return sysfs_emit(buf, "0x%x\n", hv_dev->device_id);
}
static DEVICE_ATTR_RO(device);
@@ -551,7 +541,7 @@ static ssize_t driver_override_show(struct device *dev,
ssize_t len;
device_lock(dev);
- len = snprintf(buf, PAGE_SIZE, "%s\n", hv_dev->driver_override);
+ len = sysfs_emit(buf, "%s\n", hv_dev->driver_override);
device_unlock(dev);
return len;
diff --git a/drivers/hwmon/corsair-cpro.c b/drivers/hwmon/corsair-cpro.c
index a284a02839fb..3e63666a61bd 100644
--- a/drivers/hwmon/corsair-cpro.c
+++ b/drivers/hwmon/corsair-cpro.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
#define USB_VENDOR_ID_CORSAIR 0x1b1c
@@ -77,8 +78,11 @@
struct ccp_device {
struct hid_device *hdev;
struct device *hwmon_dev;
+ /* For reinitializing the completion below */
+ spinlock_t wait_input_report_lock;
struct completion wait_input_report;
struct mutex mutex; /* whenever buffer is used, lock before send_usb_cmd */
+ u8 *cmd_buffer;
u8 *buffer;
int target[6];
DECLARE_BITMAP(temp_cnct, NUM_TEMP_SENSORS);
@@ -111,15 +115,23 @@ static int send_usb_cmd(struct ccp_device *ccp, u8 command, u8 byte1, u8 byte2,
unsigned long t;
int ret;
- memset(ccp->buffer, 0x00, OUT_BUFFER_SIZE);
- ccp->buffer[0] = command;
- ccp->buffer[1] = byte1;
- ccp->buffer[2] = byte2;
- ccp->buffer[3] = byte3;
-
+ memset(ccp->cmd_buffer, 0x00, OUT_BUFFER_SIZE);
+ ccp->cmd_buffer[0] = command;
+ ccp->cmd_buffer[1] = byte1;
+ ccp->cmd_buffer[2] = byte2;
+ ccp->cmd_buffer[3] = byte3;
+
+ /*
+ * Disable raw event parsing for a moment to safely reinitialize the
+ * completion. Reinit is done because hidraw could have triggered
+ * the raw event parsing and marked the ccp->wait_input_report
+ * completion as done.
+ */
+ spin_lock_bh(&ccp->wait_input_report_lock);
reinit_completion(&ccp->wait_input_report);
+ spin_unlock_bh(&ccp->wait_input_report_lock);
- ret = hid_hw_output_report(ccp->hdev, ccp->buffer, OUT_BUFFER_SIZE);
+ ret = hid_hw_output_report(ccp->hdev, ccp->cmd_buffer, OUT_BUFFER_SIZE);
if (ret < 0)
return ret;
@@ -135,11 +147,12 @@ static int ccp_raw_event(struct hid_device *hdev, struct hid_report *report, u8
struct ccp_device *ccp = hid_get_drvdata(hdev);
/* only copy buffer when requested */
- if (completion_done(&ccp->wait_input_report))
- return 0;
-
- memcpy(ccp->buffer, data, min(IN_BUFFER_SIZE, size));
- complete(&ccp->wait_input_report);
+ spin_lock(&ccp->wait_input_report_lock);
+ if (!completion_done(&ccp->wait_input_report)) {
+ memcpy(ccp->buffer, data, min(IN_BUFFER_SIZE, size));
+ complete_all(&ccp->wait_input_report);
+ }
+ spin_unlock(&ccp->wait_input_report_lock);
return 0;
}
@@ -492,7 +505,11 @@ static int ccp_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (!ccp)
return -ENOMEM;
- ccp->buffer = devm_kmalloc(&hdev->dev, OUT_BUFFER_SIZE, GFP_KERNEL);
+ ccp->cmd_buffer = devm_kmalloc(&hdev->dev, OUT_BUFFER_SIZE, GFP_KERNEL);
+ if (!ccp->cmd_buffer)
+ return -ENOMEM;
+
+ ccp->buffer = devm_kmalloc(&hdev->dev, IN_BUFFER_SIZE, GFP_KERNEL);
if (!ccp->buffer)
return -ENOMEM;
@@ -510,7 +527,9 @@ static int ccp_probe(struct hid_device *hdev, const struct hid_device_id *id)
ccp->hdev = hdev;
hid_set_drvdata(hdev, ccp);
+
mutex_init(&ccp->mutex);
+ spin_lock_init(&ccp->wait_input_report_lock);
init_completion(&ccp->wait_input_report);
hid_device_io_start(hdev);
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index 8d9d422450e5..d817c719b90b 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -80,11 +80,11 @@ struct ucd9000_debugfs_entry {
* It has been observed that the UCD90320 randomly fails register access when
* doing another access right on the back of a register write. To mitigate this
* make sure that there is a minimum delay between a write access and the
- * following access. The 250us is based on experimental data. At a delay of
- * 200us the issue seems to go away. Add a bit of extra margin to allow for
+ * following access. The 500 is based on experimental data. At a delay of
+ * 350us the issue seems to go away. Add a bit of extra margin to allow for
* system to system differences.
*/
-#define UCD90320_WAIT_DELAY_US 250
+#define UCD90320_WAIT_DELAY_US 500
static inline void ucd90320_wait(const struct ucd9000_data *data)
{
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index ff5c486a1dbb..db0d1ac82910 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -2200,13 +2200,18 @@ static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs,
* Returns negative errno, else the number of messages executed.
*
* Adapter lock must be held when calling this function. No debug logging
- * takes place. adap->algo->master_xfer existence isn't checked.
+ * takes place.
*/
int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
unsigned long orig_jiffies;
int ret, try;
+ if (!adap->algo->master_xfer) {
+ dev_dbg(&adap->dev, "I2C level transfers not supported\n");
+ return -EOPNOTSUPP;
+ }
+
if (WARN_ON(!msgs || num < 1))
return -EINVAL;
@@ -2273,11 +2278,6 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
int ret;
- if (!adap->algo->master_xfer) {
- dev_dbg(&adap->dev, "I2C level transfers not supported\n");
- return -EOPNOTSUPP;
- }
-
/* REVISIT the fault reporting model here is weak:
*
* - When we get an error after receiving N bytes from a slave,
diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
index 61839be501c2..63c3566a533b 100644
--- a/drivers/iio/accel/mxc4005.c
+++ b/drivers/iio/accel/mxc4005.c
@@ -5,6 +5,7 @@
* Copyright (c) 2014, Intel Corporation.
*/
+#include <linux/delay.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
@@ -27,11 +28,16 @@
#define MXC4005_REG_ZOUT_UPPER 0x07
#define MXC4005_REG_ZOUT_LOWER 0x08
+#define MXC4005_REG_INT_MASK0 0x0A
+
#define MXC4005_REG_INT_MASK1 0x0B
#define MXC4005_REG_INT_MASK1_BIT_DRDYE 0x01
+#define MXC4005_REG_INT_CLR0 0x00
+
#define MXC4005_REG_INT_CLR1 0x01
#define MXC4005_REG_INT_CLR1_BIT_DRDYC 0x01
+#define MXC4005_REG_INT_CLR1_SW_RST 0x10
#define MXC4005_REG_CONTROL 0x0D
#define MXC4005_REG_CONTROL_MASK_FSR GENMASK(6, 5)
@@ -39,6 +45,9 @@
#define MXC4005_REG_DEVICE_ID 0x0E
+/* Datasheet does not specify a reset time, this is a conservative guess */
+#define MXC4005_RESET_TIME_US 2000
+
enum mxc4005_axis {
AXIS_X,
AXIS_Y,
@@ -62,6 +71,8 @@ struct mxc4005_data {
s64 timestamp __aligned(8);
} scan;
bool trigger_enabled;
+ unsigned int control;
+ unsigned int int_mask1;
};
/*
@@ -113,7 +124,9 @@ static bool mxc4005_is_readable_reg(struct device *dev, unsigned int reg)
static bool mxc4005_is_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
+ case MXC4005_REG_INT_CLR0:
case MXC4005_REG_INT_CLR1:
+ case MXC4005_REG_INT_MASK0:
case MXC4005_REG_INT_MASK1:
case MXC4005_REG_CONTROL:
return true;
@@ -330,23 +343,20 @@ static int mxc4005_set_trigger_state(struct iio_trigger *trig,
{
struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
struct mxc4005_data *data = iio_priv(indio_dev);
+ unsigned int val;
int ret;
mutex_lock(&data->mutex);
- if (state) {
- ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1,
- MXC4005_REG_INT_MASK1_BIT_DRDYE);
- } else {
- ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1,
- ~MXC4005_REG_INT_MASK1_BIT_DRDYE);
- }
+ val = state ? MXC4005_REG_INT_MASK1_BIT_DRDYE : 0;
+ ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1, val);
if (ret < 0) {
mutex_unlock(&data->mutex);
dev_err(data->dev, "failed to update reg_int_mask1");
return ret;
}
+ data->int_mask1 = val;
data->trigger_enabled = state;
mutex_unlock(&data->mutex);
@@ -382,6 +392,21 @@ static int mxc4005_chip_init(struct mxc4005_data *data)
dev_dbg(data->dev, "MXC4005 chip id %02x\n", reg);
+ ret = regmap_write(data->regmap, MXC4005_REG_INT_CLR1,
+ MXC4005_REG_INT_CLR1_SW_RST);
+ if (ret < 0)
+ return dev_err_probe(data->dev, ret, "resetting chip\n");
+
+ fsleep(MXC4005_RESET_TIME_US);
+
+ ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK0, 0);
+ if (ret < 0)
+ return dev_err_probe(data->dev, ret, "writing INT_MASK0\n");
+
+ ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1, 0);
+ if (ret < 0)
+ return dev_err_probe(data->dev, ret, "writing INT_MASK1\n");
+
return 0;
}
@@ -469,6 +494,58 @@ static int mxc4005_probe(struct i2c_client *client)
return devm_iio_device_register(&client->dev, indio_dev);
}
+static int mxc4005_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct mxc4005_data *data = iio_priv(indio_dev);
+ int ret;
+
+ /* Save control to restore it on resume */
+ ret = regmap_read(data->regmap, MXC4005_REG_CONTROL, &data->control);
+ if (ret < 0)
+ dev_err(data->dev, "failed to read reg_control\n");
+
+ return ret;
+}
+
+static int mxc4005_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct mxc4005_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = regmap_write(data->regmap, MXC4005_REG_INT_CLR1,
+ MXC4005_REG_INT_CLR1_SW_RST);
+ if (ret) {
+ dev_err(data->dev, "failed to reset chip: %d\n", ret);
+ return ret;
+ }
+
+ fsleep(MXC4005_RESET_TIME_US);
+
+ ret = regmap_write(data->regmap, MXC4005_REG_CONTROL, data->control);
+ if (ret) {
+ dev_err(data->dev, "failed to restore control register\n");
+ return ret;
+ }
+
+ ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK0, 0);
+ if (ret) {
+ dev_err(data->dev, "failed to restore interrupt 0 mask\n");
+ return ret;
+ }
+
+ ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1, data->int_mask1);
+ if (ret) {
+ dev_err(data->dev, "failed to restore interrupt 1 mask\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(mxc4005_pm_ops, mxc4005_suspend, mxc4005_resume);
+
static const struct acpi_device_id mxc4005_acpi_match[] = {
{"MXC4005", 0},
{"MXC6655", 0},
@@ -496,6 +573,7 @@ static struct i2c_driver mxc4005_driver = {
.name = MXC4005_DRV_NAME,
.acpi_match_table = mxc4005_acpi_match,
.of_match_table = mxc4005_of_match,
+ .pm = pm_sleep_ptr(&mxc4005_pm_ops),
},
.probe = mxc4005_probe,
.id_table = mxc4005_id,
diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
index 01f55cc902fa..060a21c70460 100644
--- a/drivers/iio/imu/adis16475.c
+++ b/drivers/iio/imu/adis16475.c
@@ -1289,6 +1289,7 @@ static int adis16475_config_sync_mode(struct adis16475 *st)
struct device *dev = &st->adis.spi->dev;
const struct adis16475_sync *sync;
u32 sync_mode;
+ u16 val;
/* default to internal clk */
st->clk_freq = st->info->int_clk * 1000;
@@ -1350,8 +1351,9 @@ static int adis16475_config_sync_mode(struct adis16475 *st)
* I'm keeping this for simplicity and avoiding extra variables
* in chip_info.
*/
+ val = ADIS16475_SYNC_MODE(sync->sync_mode);
ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL,
- ADIS16475_SYNC_MODE_MASK, sync->sync_mode);
+ ADIS16475_SYNC_MODE_MASK, val);
if (ret)
return ret;
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index fe8734468ed3..62e9e93d915d 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -1233,6 +1233,7 @@ const struct bmp280_chip_info bmp380_chip_info = {
.chip_id = bmp380_chip_ids,
.num_chip_id = ARRAY_SIZE(bmp380_chip_ids),
.regmap_config = &bmp380_regmap_config,
+ .spi_read_extra_byte = true,
.start_up_time = 2000,
.channels = bmp380_channels,
.num_channels = 2,
diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
index a444d4b2978b..4e19ea0b4d39 100644
--- a/drivers/iio/pressure/bmp280-spi.c
+++ b/drivers/iio/pressure/bmp280-spi.c
@@ -96,15 +96,10 @@ static int bmp280_spi_probe(struct spi_device *spi)
chip_info = spi_get_device_match_data(spi);
- switch (chip_info->chip_id[0]) {
- case BMP380_CHIP_ID:
- case BMP390_CHIP_ID:
+ if (chip_info->spi_read_extra_byte)
bmp_regmap_bus = &bmp380_regmap_bus;
- break;
- default:
+ else
bmp_regmap_bus = &bmp280_regmap_bus;
- break;
- }
regmap = devm_regmap_init(&spi->dev,
bmp_regmap_bus,
@@ -127,7 +122,7 @@ static const struct of_device_id bmp280_of_spi_match[] = {
{ .compatible = "bosch,bmp180", .data = &bmp180_chip_info },
{ .compatible = "bosch,bmp181", .data = &bmp180_chip_info },
{ .compatible = "bosch,bmp280", .data = &bmp280_chip_info },
- { .compatible = "bosch,bme280", .data = &bmp280_chip_info },
+ { .compatible = "bosch,bme280", .data = &bme280_chip_info },
{ .compatible = "bosch,bmp380", .data = &bmp380_chip_info },
{ .compatible = "bosch,bmp580", .data = &bmp580_chip_info },
{ },
@@ -139,7 +134,7 @@ static const struct spi_device_id bmp280_spi_id[] = {
{ "bmp180", (kernel_ulong_t)&bmp180_chip_info },
{ "bmp181", (kernel_ulong_t)&bmp180_chip_info },
{ "bmp280", (kernel_ulong_t)&bmp280_chip_info },
- { "bme280", (kernel_ulong_t)&bmp280_chip_info },
+ { "bme280", (kernel_ulong_t)&bme280_chip_info },
{ "bmp380", (kernel_ulong_t)&bmp380_chip_info },
{ "bmp580", (kernel_ulong_t)&bmp580_chip_info },
{ }
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
index 4012387d7956..5812a344ed8e 100644
--- a/drivers/iio/pressure/bmp280.h
+++ b/drivers/iio/pressure/bmp280.h
@@ -423,6 +423,7 @@ struct bmp280_chip_info {
int num_chip_id;
const struct regmap_config *regmap_config;
+ bool spi_read_extra_byte;
const struct iio_chan_spec *channels;
int num_channels;
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index bf0df6ee4f78..07fb8d3c037f 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1026,23 +1026,26 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
}
}
-static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id)
+static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
+ enum ib_cm_state old_state)
{
struct cm_id_private *cm_id_priv;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- pr_err("%s: cm_id=%p timed out. state=%d refcnt=%d\n", __func__,
- cm_id, cm_id->state, refcount_read(&cm_id_priv->refcount));
+ pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
+ cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
}
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
{
struct cm_id_private *cm_id_priv;
+ enum ib_cm_state old_state;
struct cm_work *work;
int ret;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irq(&cm_id_priv->lock);
+ old_state = cm_id->state;
retest:
switch (cm_id->state) {
case IB_CM_LISTEN:
@@ -1151,7 +1154,7 @@ retest:
msecs_to_jiffies(
CM_DESTROY_ID_WAIT_TIMEOUT));
if (!ret) /* timeout happened */
- cm_destroy_id_wait_timeout(cm_id);
+ cm_destroy_id_wait_timeout(cm_id, old_state);
} while (!ret);
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 0c3c4e64812c..3e43687a7f6f 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -188,7 +188,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
mdev = dev->mdev;
mdev_port_num = 1;
}
- if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
+ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1 &&
+ !mlx5_core_mp_enabled(mdev)) {
/* set local port to one for Function-Per-Port HCA. */
mdev = dev->mdev;
mdev_port_num = 1;
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index 455e966eeff3..b27791029fa9 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -439,6 +439,7 @@ static int remove_device_files(struct super_block *sb,
return PTR_ERR(dir);
}
simple_recursive_removal(dir, NULL);
+ dput(dir);
return 0;
}
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index ae466e72fc43..255677bc12b2 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -33,6 +33,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
if (rxe->tfm)
crypto_free_shash(rxe->tfm);
+
+ mutex_destroy(&rxe->usdev_lock);
}
/* initialize rxe device parameters */
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index f50848ed5575..6fadaddb2b90 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -208,6 +208,7 @@ static const struct xpad_device {
{ 0x0738, 0xcb29, "Saitek Aviator Stick AV8R02", 0, XTYPE_XBOX360 },
{ 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 },
{ 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 },
+ { 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", 0, XTYPE_XBOXONE },
{ 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX },
{ 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX },
{ 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
@@ -487,6 +488,7 @@ static const struct usb_device_id xpad_table[] = {
{ USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */
XPAD_XBOX360_VENDOR(0x07ff), /* Mad Catz Gamepad */
+ XPAD_XBOXONE_VENDOR(0x0b05), /* ASUS controllers */
XPAD_XBOX360_VENDOR(0x0c12), /* Zeroplus X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f Xbox 360 controllers */
XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f Xbox One controllers */
diff --git a/drivers/input/misc/atlas_btns.c b/drivers/input/misc/atlas_btns.c
index 3c9bbd04e143..5b9be2957746 100644
--- a/drivers/input/misc/atlas_btns.c
+++ b/drivers/input/misc/atlas_btns.c
@@ -127,7 +127,6 @@ MODULE_DEVICE_TABLE(acpi, atlas_device_ids);
static struct acpi_driver atlas_acpi_driver = {
.name = ACPI_ATLAS_NAME,
.class = ACPI_ATLAS_CLASS,
- .owner = THIS_MODULE,
.ids = atlas_device_ids,
.ops = {
.add = atlas_acpi_button_add,
diff --git a/drivers/input/mouse/amimouse.c b/drivers/input/mouse/amimouse.c
index cda0c3ff5a28..2fbbaeb76d70 100644
--- a/drivers/input/mouse/amimouse.c
+++ b/drivers/input/mouse/amimouse.c
@@ -132,7 +132,13 @@ static void __exit amimouse_remove(struct platform_device *pdev)
input_unregister_device(dev);
}
-static struct platform_driver amimouse_driver = {
+/*
+ * amimouse_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver amimouse_driver __refdata = {
.remove_new = __exit_p(amimouse_remove),
.driver = {
.name = "amiga-mouse",
diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h
index 64590b86eb37..a8f4b2d70e59 100644
--- a/drivers/input/serio/i8042-io.h
+++ b/drivers/input/serio/i8042-io.h
@@ -15,10 +15,7 @@
* IRQs.
*/
-#ifdef __alpha__
-# define I8042_KBD_IRQ 1
-# define I8042_AUX_IRQ (RTC_PORT(0) == 0x170 ? 9 : 12) /* Jensen is special */
-#elif defined(__arm__)
+#if defined(__arm__)
/* defined in include/asm-arm/arch-xxx/irqs.h */
#include <asm/irq.h>
#elif defined(CONFIG_PPC)
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 5d1010cafed8..7e9b996b47c8 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -176,6 +176,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
path->num_nodes = num_nodes;
+ mutex_lock(&icc_bw_lock);
+
for (i = num_nodes - 1; i >= 0; i--) {
node->provider->users++;
hlist_add_head(&path->reqs[i].req_node, &node->req_list);
@@ -186,6 +188,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
node = node->reverse;
}
+ mutex_unlock(&icc_bw_lock);
+
return path;
}
@@ -792,12 +796,16 @@ void icc_put(struct icc_path *path)
pr_err("%s: error (%d)\n", __func__, ret);
mutex_lock(&icc_lock);
+ mutex_lock(&icc_bw_lock);
+
for (i = 0; i < path->num_nodes; i++) {
node = path->reqs[i].node;
hlist_del(&path->reqs[i].req_node);
if (!WARN_ON(!node->provider->users))
node->provider->users--;
}
+
+ mutex_unlock(&icc_bw_lock);
mutex_unlock(&icc_lock);
kfree_const(path->name);
diff --git a/drivers/interconnect/qcom/x1e80100.c b/drivers/interconnect/qcom/x1e80100.c
index 99824675ee3f..654abb9ce08e 100644
--- a/drivers/interconnect/qcom/x1e80100.c
+++ b/drivers/interconnect/qcom/x1e80100.c
@@ -116,15 +116,6 @@ static struct qcom_icc_node xm_sdc2 = {
.links = { X1E80100_SLAVE_A2NOC_SNOC },
};
-static struct qcom_icc_node ddr_perf_mode_master = {
- .name = "ddr_perf_mode_master",
- .id = X1E80100_MASTER_DDR_PERF_MODE,
- .channels = 1,
- .buswidth = 4,
- .num_links = 1,
- .links = { X1E80100_SLAVE_DDR_PERF_MODE },
-};
-
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
.id = X1E80100_MASTER_QUP_CORE_0,
@@ -688,14 +679,6 @@ static struct qcom_icc_node qns_a2noc_snoc = {
.links = { X1E80100_MASTER_A2NOC_SNOC },
};
-static struct qcom_icc_node ddr_perf_mode_slave = {
- .name = "ddr_perf_mode_slave",
- .id = X1E80100_SLAVE_DDR_PERF_MODE,
- .channels = 1,
- .buswidth = 4,
- .num_links = 0,
-};
-
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
.id = X1E80100_SLAVE_QUP_CORE_0,
@@ -1377,12 +1360,6 @@ static struct qcom_icc_bcm bcm_acv = {
.nodes = { &ebi },
};
-static struct qcom_icc_bcm bcm_acv_perf = {
- .name = "ACV_PERF",
- .num_nodes = 1,
- .nodes = { &ddr_perf_mode_slave },
-};
-
static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0",
.num_nodes = 1,
@@ -1583,18 +1560,15 @@ static const struct qcom_icc_desc x1e80100_aggre2_noc = {
};
static struct qcom_icc_bcm * const clk_virt_bcms[] = {
- &bcm_acv_perf,
&bcm_qup0,
&bcm_qup1,
&bcm_qup2,
};
static struct qcom_icc_node * const clk_virt_nodes[] = {
- [MASTER_DDR_PERF_MODE] = &ddr_perf_mode_master,
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[MASTER_QUP_CORE_2] = &qup2_core_master,
- [SLAVE_DDR_PERF_MODE] = &ddr_perf_mode_slave,
[SLAVE_QUP_CORE_0] = &qup0_core_slave,
[SLAVE_QUP_CORE_1] = &qup1_core_slave,
[SLAVE_QUP_CORE_2] = &qup2_core_slave,
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 33228c1c8980..ac6754a85f35 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -3232,28 +3232,29 @@ static void iommu_snp_enable(void)
return;
/*
* The SNP support requires that IOMMU must be enabled, and is
- * not configured in the passthrough mode.
+ * configured with V1 page table (DTE[Mode] = 0 is not supported).
*/
if (no_iommu || iommu_default_passthrough()) {
- pr_err("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n");
- cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
- return;
+ pr_warn("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n");
+ goto disable_snp;
+ }
+
+ if (amd_iommu_pgtable != AMD_IOMMU_V1) {
+ pr_warn("SNP: IOMMU is configured with V2 page table mode, SNP cannot be supported.\n");
+ goto disable_snp;
}
amd_iommu_snp_en = check_feature(FEATURE_SNP);
if (!amd_iommu_snp_en) {
- pr_err("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n");
- cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
- return;
+ pr_warn("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n");
+ goto disable_snp;
}
pr_info("IOMMU SNP support enabled.\n");
+ return;
- /* Enforce IOMMU v1 pagetable when SNP is enabled. */
- if (amd_iommu_pgtable != AMD_IOMMU_V1) {
- pr_warn("Forcing use of AMD IOMMU v1 page table due to SNP.\n");
- amd_iommu_pgtable = AMD_IOMMU_V1;
- }
+disable_snp:
+ cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
#endif
}
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index d35c1b8c8e65..fb727f5b0b82 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -1692,26 +1692,29 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
static u16 domain_id_alloc(void)
{
+ unsigned long flags;
int id;
- spin_lock(&pd_bitmap_lock);
+ spin_lock_irqsave(&pd_bitmap_lock, flags);
id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
BUG_ON(id == 0);
if (id > 0 && id < MAX_DOMAIN_ID)
__set_bit(id, amd_iommu_pd_alloc_bitmap);
else
id = 0;
- spin_unlock(&pd_bitmap_lock);
+ spin_unlock_irqrestore(&pd_bitmap_lock, flags);
return id;
}
static void domain_id_free(int id)
{
- spin_lock(&pd_bitmap_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pd_bitmap_lock, flags);
if (id > 0 && id < MAX_DOMAIN_ID)
__clear_bit(id, amd_iommu_pd_alloc_bitmap);
- spin_unlock(&pd_bitmap_lock);
+ spin_unlock_irqrestore(&pd_bitmap_lock, flags);
}
static void free_gcr3_tbl_level1(u64 *tbl)
@@ -2751,6 +2754,10 @@ static int amd_iommu_def_domain_type(struct device *dev)
if (!dev_data)
return 0;
+ /* Always use DMA domain for untrusted device */
+ if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted)
+ return IOMMU_DOMAIN_DMA;
+
/*
* Do not identity map IOMMUv2 capable devices when:
* - memory encryption is active, because some of those devices
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
index 87bf522b9d2e..957d988b6d83 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
@@ -221,11 +221,9 @@ static irqreturn_t nvidia_smmu_context_fault(int irq, void *dev)
unsigned int inst;
irqreturn_t ret = IRQ_NONE;
struct arm_smmu_device *smmu;
- struct iommu_domain *domain = dev;
- struct arm_smmu_domain *smmu_domain;
+ struct arm_smmu_domain *smmu_domain = dev;
struct nvidia_smmu *nvidia;
- smmu_domain = container_of(domain, struct arm_smmu_domain, domain);
smmu = smmu_domain->smmu;
nvidia = to_nvidia_smmu(smmu);
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 50eb9aed47cc..a7ecd90303dc 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -4299,9 +4299,11 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
}
dev_iommu_priv_set(dev, info);
- ret = device_rbtree_insert(iommu, info);
- if (ret)
- goto free;
+ if (pdev && pci_ats_supported(pdev)) {
+ ret = device_rbtree_insert(iommu, info);
+ if (ret)
+ goto free;
+ }
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
ret = intel_pasid_alloc_table(dev);
@@ -4336,7 +4338,8 @@ static void intel_iommu_release_device(struct device *dev)
struct intel_iommu *iommu = info->iommu;
mutex_lock(&iommu->iopf_lock);
- device_rbtree_remove(info);
+ if (dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev)))
+ device_rbtree_remove(info);
mutex_unlock(&iommu->iopf_lock);
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 566297bc87dd..712ebfc9870c 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -19,6 +19,7 @@
#include <asm/cpu.h>
#include <asm/irq_remapping.h>
#include <asm/pci-direct.h>
+#include <asm/posted_intr.h>
#include "iommu.h"
#include "../irq_remapping.h"
@@ -49,6 +50,7 @@ struct irq_2_iommu {
u16 sub_handle;
u8 irte_mask;
enum irq_mode mode;
+ bool posted_msi;
};
struct intel_ir_data {
@@ -1118,6 +1120,14 @@ static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
irte->redir_hint = 1;
}
+static void prepare_irte_posted(struct irte *irte)
+{
+ memset(irte, 0, sizeof(*irte));
+
+ irte->present = 1;
+ irte->p_pst = 1;
+}
+
struct irq_remap_ops intel_irq_remap_ops = {
.prepare = intel_prepare_irq_remapping,
.enable = intel_enable_irq_remapping,
@@ -1126,6 +1136,47 @@ struct irq_remap_ops intel_irq_remap_ops = {
.enable_faulting = enable_drhd_fault_handling,
};
+#ifdef CONFIG_X86_POSTED_MSI
+
+static phys_addr_t get_pi_desc_addr(struct irq_data *irqd)
+{
+ int cpu = cpumask_first(irq_data_get_effective_affinity_mask(irqd));
+
+ if (WARN_ON(cpu >= nr_cpu_ids))
+ return 0;
+
+ return __pa(per_cpu_ptr(&posted_msi_pi_desc, cpu));
+}
+
+static void intel_ir_reconfigure_irte_posted(struct irq_data *irqd)
+{
+ struct intel_ir_data *ir_data = irqd->chip_data;
+ struct irte *irte = &ir_data->irte_entry;
+ struct irte irte_pi;
+ u64 pid_addr;
+
+ pid_addr = get_pi_desc_addr(irqd);
+
+ if (!pid_addr) {
+ pr_warn("Failed to setup IRQ %d for posted mode", irqd->irq);
+ return;
+ }
+
+ memset(&irte_pi, 0, sizeof(irte_pi));
+
+ /* The shared IRTE already be set up as posted during alloc_irte */
+ dmar_copy_shared_irte(&irte_pi, irte);
+
+ irte_pi.pda_l = (pid_addr >> (32 - PDA_LOW_BIT)) & ~(-1UL << PDA_LOW_BIT);
+ irte_pi.pda_h = (pid_addr >> 32) & ~(-1UL << PDA_HIGH_BIT);
+
+ modify_irte(&ir_data->irq_2_iommu, &irte_pi);
+}
+
+#else
+static inline void intel_ir_reconfigure_irte_posted(struct irq_data *irqd) {}
+#endif
+
static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
{
struct intel_ir_data *ir_data = irqd->chip_data;
@@ -1139,8 +1190,9 @@ static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
irte->vector = cfg->vector;
irte->dest_id = IRTE_DEST(cfg->dest_apicid);
- /* Update the hardware only if the interrupt is in remapped mode. */
- if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
+ if (ir_data->irq_2_iommu.posted_msi)
+ intel_ir_reconfigure_irte_posted(irqd);
+ else if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
modify_irte(&ir_data->irq_2_iommu, irte);
}
@@ -1194,7 +1246,7 @@ static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
struct intel_ir_data *ir_data = data->chip_data;
struct vcpu_data *vcpu_pi_info = info;
- /* stop posting interrupts, back to remapping mode */
+ /* stop posting interrupts, back to the default mode */
if (!vcpu_pi_info) {
modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry);
} else {
@@ -1233,6 +1285,49 @@ static struct irq_chip intel_ir_chip = {
.irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
};
+/*
+ * With posted MSIs, all vectors are multiplexed into a single notification
+ * vector. Devices MSIs are then dispatched in a demux loop where
+ * EOIs can be coalesced as well.
+ *
+ * "INTEL-IR-POST" IRQ chip does not do EOI on ACK, thus the dummy irq_ack()
+ * function. Instead EOI is performed by the posted interrupt notification
+ * handler.
+ *
+ * For the example below, 3 MSIs are coalesced into one CPU notification. Only
+ * one apic_eoi() is needed.
+ *
+ * __sysvec_posted_msi_notification()
+ * irq_enter();
+ * handle_edge_irq()
+ * irq_chip_ack_parent()
+ * dummy(); // No EOI
+ * handle_irq_event()
+ * driver_handler()
+ * handle_edge_irq()
+ * irq_chip_ack_parent()
+ * dummy(); // No EOI
+ * handle_irq_event()
+ * driver_handler()
+ * handle_edge_irq()
+ * irq_chip_ack_parent()
+ * dummy(); // No EOI
+ * handle_irq_event()
+ * driver_handler()
+ * apic_eoi()
+ * irq_exit()
+ */
+
+static void dummy_ack(struct irq_data *d) { }
+
+static struct irq_chip intel_ir_chip_post_msi = {
+ .name = "INTEL-IR-POST",
+ .irq_ack = dummy_ack,
+ .irq_set_affinity = intel_ir_set_affinity,
+ .irq_compose_msi_msg = intel_ir_compose_msi_msg,
+ .irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
+};
+
static void fill_msi_msg(struct msi_msg *msg, u32 index, u32 subhandle)
{
memset(msg, 0, sizeof(*msg));
@@ -1274,6 +1369,11 @@ static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data,
break;
case X86_IRQ_ALLOC_TYPE_PCI_MSI:
case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
+ if (posted_msi_supported()) {
+ prepare_irte_posted(irte);
+ data->irq_2_iommu.posted_msi = 1;
+ }
+
set_msi_sid(irte,
pci_real_dma_dev(msi_desc_to_pci_dev(info->desc)));
break;
@@ -1361,7 +1461,12 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
irq_data->hwirq = (index << 16) + i;
irq_data->chip_data = ird;
- irq_data->chip = &intel_ir_chip;
+ if (posted_msi_supported() &&
+ ((info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI) ||
+ (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX)))
+ irq_data->chip = &intel_ir_chip_post_msi;
+ else
+ irq_data->chip = &intel_ir_chip;
intel_irq_remapping_prepare_irte(ird, irq_cfg, info, index, i);
irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
}
diff --git a/drivers/iommu/intel/perfmon.c b/drivers/iommu/intel/perfmon.c
index cf43e798eca4..44083d01852d 100644
--- a/drivers/iommu/intel/perfmon.c
+++ b/drivers/iommu/intel/perfmon.c
@@ -438,7 +438,7 @@ static int iommu_pmu_assign_event(struct iommu_pmu *iommu_pmu,
iommu_pmu_set_filter(domain, event->attr.config1,
IOMMU_PMU_FILTER_DOMAIN, idx,
event->attr.config1);
- iommu_pmu_set_filter(pasid, event->attr.config1,
+ iommu_pmu_set_filter(pasid, event->attr.config2,
IOMMU_PMU_FILTER_PASID, idx,
event->attr.config1);
iommu_pmu_set_filter(ats, event->attr.config2,
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index c1bed89b1026..ee3b469e2da1 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -66,7 +66,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
struct page *pages;
int irq, ret;
- pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
+ pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
if (!pages) {
pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
iommu->name);
diff --git a/drivers/iommu/iommufd/Kconfig b/drivers/iommu/iommufd/Kconfig
index 99d4b075df49..76656fe0470d 100644
--- a/drivers/iommu/iommufd/Kconfig
+++ b/drivers/iommu/iommufd/Kconfig
@@ -37,6 +37,7 @@ config IOMMUFD_TEST
depends on DEBUG_KERNEL
depends on FAULT_INJECTION
depends on RUNTIME_TESTING_MENU
+ select IOMMUFD_DRIVER
default n
help
This is dangerous, do not enable unless running
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index ee59647c2050..056fec6991bc 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -24,6 +24,8 @@ int no_x2apic_optout;
int disable_irq_post = 0;
+bool enable_posted_msi __ro_after_init;
+
static int disable_irq_remap;
static struct irq_remap_ops *remap_ops;
@@ -70,7 +72,8 @@ static __init int setup_irqremap(char *str)
no_x2apic_optout = 1;
else if (!strncmp(str, "nopost", 6))
disable_irq_post = 1;
-
+ else if (IS_ENABLED(CONFIG_X86_POSTED_MSI) && !strncmp(str, "posted_msi", 10))
+ enable_posted_msi = true;
str += strcspn(str, ",");
while (*str == ',')
str++;
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index b8c47f18bc26..6a2707fe7a78 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -1790,6 +1790,7 @@ static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt8365-m4u", .data = &mt8365_data},
{}
};
+MODULE_DEVICE_TABLE(of, mtk_iommu_of_ids);
static struct platform_driver mtk_iommu_driver = {
.probe = mtk_iommu_probe,
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index a9fa2a54dc9b..d6e4002200bd 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -600,6 +600,7 @@ static const struct of_device_id mtk_iommu_v1_of_ids[] = {
{ .compatible = "mediatek,mt2701-m4u", },
{}
};
+MODULE_DEVICE_TABLE(of, mtk_iommu_v1_of_ids);
static const struct component_master_ops mtk_iommu_v1_com_ops = {
.bind = mtk_iommu_v1_bind,
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 72c07a12f5e1..14464716bacb 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -540,6 +540,31 @@ config RISCV_INTC
depends on RISCV
select IRQ_DOMAIN_HIERARCHY
+config RISCV_APLIC
+ bool
+ depends on RISCV
+ select IRQ_DOMAIN_HIERARCHY
+
+config RISCV_APLIC_MSI
+ bool
+ depends on RISCV_APLIC
+ select GENERIC_MSI_IRQ
+ default RISCV_APLIC
+
+config RISCV_IMSIC
+ bool
+ depends on RISCV
+ select IRQ_DOMAIN_HIERARCHY
+ select GENERIC_IRQ_MATRIX_ALLOCATOR
+ select GENERIC_MSI_IRQ
+
+config RISCV_IMSIC_PCI
+ bool
+ depends on RISCV_IMSIC
+ depends on PCI
+ depends on PCI_MSI
+ default RISCV_IMSIC
+
config SIFIVE_PLIC
bool
depends on RISCV
@@ -568,7 +593,7 @@ config IRQ_LOONGARCH_CPU
bool
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
- select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
select LOONGSON_HTVEC
select LOONGSON_LIOINTC
select LOONGSON_EIOINTC
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index ec4a18380998..d9dc3d99aaa8 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -95,6 +95,9 @@ obj-$(CONFIG_QCOM_MPM) += irq-qcom-mpm.o
obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o
obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o
obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o
+obj-$(CONFIG_RISCV_APLIC) += irq-riscv-aplic-main.o irq-riscv-aplic-direct.o
+obj-$(CONFIG_RISCV_APLIC_MSI) += irq-riscv-aplic-msi.o
+obj-$(CONFIG_RISCV_IMSIC) += irq-riscv-imsic-state.o irq-riscv-imsic-early.o irq-riscv-imsic-platform.o
obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
obj-$(CONFIG_STARFIVE_JH8100_INTC) += irq-starfive-jh8100-intc.o
obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
index 9c8b1349ee17..a1430ab60a8a 100644
--- a/drivers/irqchip/irq-alpine-msi.c
+++ b/drivers/irqchip/irq-alpine-msi.c
@@ -165,7 +165,7 @@ static int alpine_msix_middle_domain_alloc(struct irq_domain *domain,
return 0;
err_sgi:
- irq_domain_free_irqs_parent(domain, virq, i - 1);
+ irq_domain_free_irqs_parent(domain, virq, i);
alpine_msix_free_sgi(priv, sgi, nr_irqs);
return err;
}
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
index eb02d203c963..90daa274ef23 100644
--- a/drivers/irqchip/irq-bcm6345-l1.c
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -192,14 +192,10 @@ static int bcm6345_l1_set_affinity(struct irq_data *d,
u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
unsigned int old_cpu = cpu_for_irq(intc, d);
unsigned int new_cpu;
- struct cpumask valid;
unsigned long flags;
bool enabled;
- if (!cpumask_and(&valid, &intc->cpumask, dest))
- return -EINVAL;
-
- new_cpu = cpumask_any_and(&valid, cpu_online_mask);
+ new_cpu = cpumask_first_and_and(&intc->cpumask, dest, cpu_online_mask);
if (new_cpu >= nr_cpu_ids)
return -EINVAL;
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 2b0b3175cea0..c988886917f7 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -118,7 +118,7 @@ out:
chained_irq_exit(chip, desc);
}
-static void brcmstb_l2_intc_suspend(struct irq_data *d)
+static void __brcmstb_l2_intc_suspend(struct irq_data *d, bool save)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
@@ -127,7 +127,8 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
irq_gc_lock_irqsave(gc, flags);
/* Save the current mask */
- b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
+ if (save)
+ b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
if (b->can_wake) {
/* Program the wakeup mask */
@@ -137,6 +138,16 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
irq_gc_unlock_irqrestore(gc, flags);
}
+static void brcmstb_l2_intc_shutdown(struct irq_data *d)
+{
+ __brcmstb_l2_intc_suspend(d, false);
+}
+
+static void brcmstb_l2_intc_suspend(struct irq_data *d)
+{
+ __brcmstb_l2_intc_suspend(d, true);
+}
+
static void brcmstb_l2_intc_resume(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
@@ -252,7 +263,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
ct->chip.irq_suspend = brcmstb_l2_intc_suspend;
ct->chip.irq_resume = brcmstb_l2_intc_resume;
- ct->chip.irq_pm_shutdown = brcmstb_l2_intc_suspend;
+ ct->chip.irq_pm_shutdown = brcmstb_l2_intc_shutdown;
if (data->can_wake) {
/* This IRQ chip can wake the system, set all child interrupts
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index fca888b36680..40ebf1726393 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -786,6 +786,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
+ struct its_vpe *vpe = valid_vpe(its, desc->its_vmapp_cmd.vpe);
unsigned long vpt_addr, vconf_addr;
u64 target;
bool alloc;
@@ -798,6 +799,11 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
if (is_v4_1(its)) {
alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
its_encode_alloc(cmd, alloc);
+ /*
+ * Unmapping a VPE is self-synchronizing on GICv4.1,
+ * no need to issue a VSYNC.
+ */
+ vpe = NULL;
}
goto out;
@@ -832,7 +838,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
out:
its_fixup_cmd(cmd);
- return valid_vpe(its, desc->its_vmapp_cmd.vpe);
+ return vpe;
}
static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
@@ -3826,9 +3832,9 @@ static int its_vpe_set_affinity(struct irq_data *d,
bool force)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
- struct cpumask common, *table_mask;
+ unsigned int from, cpu = nr_cpu_ids;
+ struct cpumask *table_mask;
unsigned long flags;
- int from, cpu;
/*
* Changing affinity is mega expensive, so let's be as lazy as
@@ -3850,10 +3856,15 @@ static int its_vpe_set_affinity(struct irq_data *d,
* If we are offered another CPU in the same GICv4.1 ITS
* affinity, pick this one. Otherwise, any CPU will do.
*/
- if (table_mask && cpumask_and(&common, mask_val, table_mask))
- cpu = cpumask_test_cpu(from, &common) ? from : cpumask_first(&common);
- else
+ if (table_mask)
+ cpu = cpumask_any_and(mask_val, table_mask);
+ if (cpu < nr_cpu_ids) {
+ if (cpumask_test_cpu(from, mask_val) &&
+ cpumask_test_cpu(from, table_mask))
+ cpu = from;
+ } else {
cpu = cpumask_first(mask_val);
+ }
if (from == cpu)
goto out;
@@ -4521,8 +4532,6 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
struct page *vprop_page;
int base, nr_ids, i, err = 0;
- BUG_ON(!vm);
-
bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
if (!bitmap)
return -ENOMEM;
@@ -4561,13 +4570,8 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i));
}
- if (err) {
- if (i > 0)
- its_vpe_irq_domain_free(domain, virq, i);
-
- its_lpi_free(bitmap, base, nr_ids);
- its_free_prop_table(vprop_page);
- }
+ if (err)
+ its_vpe_irq_domain_free(domain, virq, i);
return err;
}
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index b64cbe3052e8..c7ddebf312ad 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -59,6 +59,7 @@ static int cpu_to_eio_node(int cpu)
return cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
}
+#ifdef CONFIG_SMP
static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map)
{
int i, node, cpu_node, route_node;
@@ -92,19 +93,15 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af
unsigned int cpu;
unsigned long flags;
uint32_t vector, regaddr;
- struct cpumask intersect_affinity;
struct eiointc_priv *priv = d->domain->host_data;
raw_spin_lock_irqsave(&affinity_lock, flags);
- cpumask_and(&intersect_affinity, affinity, cpu_online_mask);
- cpumask_and(&intersect_affinity, &intersect_affinity, &priv->cpuspan_map);
-
- if (cpumask_empty(&intersect_affinity)) {
+ cpu = cpumask_first_and_and(&priv->cpuspan_map, affinity, cpu_online_mask);
+ if (cpu >= nr_cpu_ids) {
raw_spin_unlock_irqrestore(&affinity_lock, flags);
return -EINVAL;
}
- cpu = cpumask_first(&intersect_affinity);
vector = d->hwirq;
regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2);
@@ -126,6 +123,7 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af
return IRQ_SET_MASK_OK;
}
+#endif
static int eiointc_index(int node)
{
@@ -238,7 +236,9 @@ static struct irq_chip eiointc_irq_chip = {
.irq_ack = eiointc_ack_irq,
.irq_mask = eiointc_mask_irq,
.irq_unmask = eiointc_unmask_irq,
+#ifdef CONFIG_SMP
.irq_set_affinity = eiointc_set_irq_affinity,
+#endif
};
static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq,
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index 6e1e1f011bb2..dd4d699170f4 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -136,7 +136,7 @@ static int pch_msi_middle_domain_alloc(struct irq_domain *domain,
err_hwirq:
pch_msi_free_hwirq(priv, hwirq, nr_irqs);
- irq_domain_free_irqs_parent(domain, virq, i - 1);
+ irq_domain_free_irqs_parent(domain, virq, i);
return err;
}
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
index 63db8e2172e0..cbaef65e804c 100644
--- a/drivers/irqchip/irq-loongson-pch-pic.c
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -33,6 +33,7 @@
#define PIC_COUNT (PIC_COUNT_PER_REG * PIC_REG_COUNT)
#define PIC_REG_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG)
#define PIC_REG_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG)
+#define PIC_UNDEF_VECTOR 255
static int nr_pics;
@@ -46,12 +47,19 @@ struct pch_pic {
u32 saved_vec_en[PIC_REG_COUNT];
u32 saved_vec_pol[PIC_REG_COUNT];
u32 saved_vec_edge[PIC_REG_COUNT];
+ u8 table[PIC_COUNT];
+ int inuse;
};
static struct pch_pic *pch_pic_priv[MAX_IO_PICS];
struct fwnode_handle *pch_pic_handle[MAX_IO_PICS];
+static inline u8 hwirq_to_bit(struct pch_pic *priv, int hirq)
+{
+ return priv->table[hirq];
+}
+
static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit)
{
u32 reg;
@@ -80,45 +88,47 @@ static void pch_pic_mask_irq(struct irq_data *d)
{
struct pch_pic *priv = irq_data_get_irq_chip_data(d);
- pch_pic_bitset(priv, PCH_PIC_MASK, d->hwirq);
+ pch_pic_bitset(priv, PCH_PIC_MASK, hwirq_to_bit(priv, d->hwirq));
irq_chip_mask_parent(d);
}
static void pch_pic_unmask_irq(struct irq_data *d)
{
struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+ int bit = hwirq_to_bit(priv, d->hwirq);
- writel(BIT(PIC_REG_BIT(d->hwirq)),
- priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4);
+ writel(BIT(PIC_REG_BIT(bit)),
+ priv->base + PCH_PIC_CLR + PIC_REG_IDX(bit) * 4);
irq_chip_unmask_parent(d);
- pch_pic_bitclr(priv, PCH_PIC_MASK, d->hwirq);
+ pch_pic_bitclr(priv, PCH_PIC_MASK, bit);
}
static int pch_pic_set_type(struct irq_data *d, unsigned int type)
{
struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+ int bit = hwirq_to_bit(priv, d->hwirq);
int ret = 0;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
- pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq);
- pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq);
+ pch_pic_bitset(priv, PCH_PIC_EDGE, bit);
+ pch_pic_bitclr(priv, PCH_PIC_POL, bit);
irq_set_handler_locked(d, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_FALLING:
- pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq);
- pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq);
+ pch_pic_bitset(priv, PCH_PIC_EDGE, bit);
+ pch_pic_bitset(priv, PCH_PIC_POL, bit);
irq_set_handler_locked(d, handle_edge_irq);
break;
case IRQ_TYPE_LEVEL_HIGH:
- pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq);
- pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq);
+ pch_pic_bitclr(priv, PCH_PIC_EDGE, bit);
+ pch_pic_bitclr(priv, PCH_PIC_POL, bit);
irq_set_handler_locked(d, handle_level_irq);
break;
case IRQ_TYPE_LEVEL_LOW:
- pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq);
- pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq);
+ pch_pic_bitclr(priv, PCH_PIC_EDGE, bit);
+ pch_pic_bitset(priv, PCH_PIC_POL, bit);
irq_set_handler_locked(d, handle_level_irq);
break;
default:
@@ -133,11 +143,12 @@ static void pch_pic_ack_irq(struct irq_data *d)
{
unsigned int reg;
struct pch_pic *priv = irq_data_get_irq_chip_data(d);
+ int bit = hwirq_to_bit(priv, d->hwirq);
- reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(d->hwirq) * 4);
- if (reg & BIT(PIC_REG_BIT(d->hwirq))) {
- writel(BIT(PIC_REG_BIT(d->hwirq)),
- priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4);
+ reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(bit) * 4);
+ if (reg & BIT(PIC_REG_BIT(bit))) {
+ writel(BIT(PIC_REG_BIT(bit)),
+ priv->base + PCH_PIC_CLR + PIC_REG_IDX(bit) * 4);
}
irq_chip_ack_parent(d);
}
@@ -159,6 +170,8 @@ static int pch_pic_domain_translate(struct irq_domain *d,
{
struct pch_pic *priv = d->host_data;
struct device_node *of_node = to_of_node(fwspec->fwnode);
+ unsigned long flags;
+ int i;
if (of_node) {
if (fwspec->param_count < 2)
@@ -171,12 +184,33 @@ static int pch_pic_domain_translate(struct irq_domain *d,
return -EINVAL;
*hwirq = fwspec->param[0] - priv->gsi_base;
+
if (fwspec->param_count > 1)
*type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
else
*type = IRQ_TYPE_NONE;
}
+ raw_spin_lock_irqsave(&priv->pic_lock, flags);
+ /* Check pic-table to confirm if the hwirq has been assigned */
+ for (i = 0; i < priv->inuse; i++) {
+ if (priv->table[i] == *hwirq) {
+ *hwirq = i;
+ break;
+ }
+ }
+ if (i == priv->inuse) {
+ /* Assign a new hwirq in pic-table */
+ if (priv->inuse >= PIC_COUNT) {
+ pr_err("pch-pic domain has no free vectors\n");
+ raw_spin_unlock_irqrestore(&priv->pic_lock, flags);
+ return -EINVAL;
+ }
+ priv->table[priv->inuse] = *hwirq;
+ *hwirq = priv->inuse++;
+ }
+ raw_spin_unlock_irqrestore(&priv->pic_lock, flags);
+
return 0;
}
@@ -194,6 +228,9 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq,
if (err)
return err;
+ /* Write vector ID */
+ writeb(priv->ht_vec_base + hwirq, priv->base + PCH_INT_HTVEC(hwirq_to_bit(priv, hwirq)));
+
parent_fwspec.fwnode = domain->parent->fwnode;
parent_fwspec.param_count = 1;
parent_fwspec.param[0] = hwirq + priv->ht_vec_base;
@@ -222,7 +259,7 @@ static void pch_pic_reset(struct pch_pic *priv)
for (i = 0; i < PIC_COUNT; i++) {
/* Write vector ID */
- writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i));
+ writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(hwirq_to_bit(priv, i)));
/* Hardcode route to HT0 Lo */
writeb(1, priv->base + PCH_INT_ROUTE(i));
}
@@ -284,6 +321,7 @@ static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base,
u32 gsi_base)
{
struct pch_pic *priv;
+ int i;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -294,6 +332,10 @@ static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base,
if (!priv->base)
goto free_priv;
+ priv->inuse = 0;
+ for (i = 0; i < PIC_COUNT; i++)
+ priv->table[i] = PIC_UNDEF_VECTOR;
+
priv->ht_vec_base = vec_base;
priv->vec_count = ((readq(priv->base) >> 48) & 0xff) + 1;
priv->gsi_base = gsi_base;
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index be9680645545..d67b5da38982 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -130,7 +130,7 @@ static struct irq_chip asm9260_icoll_chip = {
IRQCHIP_SKIP_SET_WAKE,
};
-asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
{
u32 irqnr;
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index ae67fec2ab46..f6484bf15e0b 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -138,7 +138,7 @@ static void rzg2l_irqc_eoi(struct irq_data *d)
irq_chip_eoi_parent(d);
}
-static void rzg2l_irqc_irq_disable(struct irq_data *d)
+static void rzg2l_tint_irq_endisable(struct irq_data *d, bool enable)
{
unsigned int hw_irq = irqd_to_hwirq(d);
@@ -151,30 +151,24 @@ static void rzg2l_irqc_irq_disable(struct irq_data *d)
raw_spin_lock(&priv->lock);
reg = readl_relaxed(priv->base + TSSR(tssr_index));
- reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset));
+ if (enable)
+ reg |= TIEN << TSSEL_SHIFT(tssr_offset);
+ else
+ reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset));
writel_relaxed(reg, priv->base + TSSR(tssr_index));
raw_spin_unlock(&priv->lock);
}
+}
+
+static void rzg2l_irqc_irq_disable(struct irq_data *d)
+{
+ rzg2l_tint_irq_endisable(d, false);
irq_chip_disable_parent(d);
}
static void rzg2l_irqc_irq_enable(struct irq_data *d)
{
- unsigned int hw_irq = irqd_to_hwirq(d);
-
- if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) {
- struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
- u32 offset = hw_irq - IRQC_TINT_START;
- u32 tssr_offset = TSSR_OFFSET(offset);
- u8 tssr_index = TSSR_INDEX(offset);
- u32 reg;
-
- raw_spin_lock(&priv->lock);
- reg = readl_relaxed(priv->base + TSSR(tssr_index));
- reg |= TIEN << TSSEL_SHIFT(tssr_offset);
- writel_relaxed(reg, priv->base + TSSR(tssr_index));
- raw_spin_unlock(&priv->lock);
- }
+ rzg2l_tint_irq_endisable(d, true);
irq_chip_enable_parent(d);
}
diff --git a/drivers/irqchip/irq-riscv-aplic-direct.c b/drivers/irqchip/irq-riscv-aplic-direct.c
new file mode 100644
index 000000000000..4a3ffe856d6c
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-aplic-direct.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/riscv-aplic.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/printk.h>
+#include <linux/smp.h>
+
+#include "irq-riscv-aplic-main.h"
+
+#define APLIC_DISABLE_IDELIVERY 0
+#define APLIC_ENABLE_IDELIVERY 1
+#define APLIC_DISABLE_ITHRESHOLD 1
+#define APLIC_ENABLE_ITHRESHOLD 0
+
+struct aplic_direct {
+ struct aplic_priv priv;
+ struct irq_domain *irqdomain;
+ struct cpumask lmask;
+};
+
+struct aplic_idc {
+ unsigned int hart_index;
+ void __iomem *regs;
+ struct aplic_direct *direct;
+};
+
+static unsigned int aplic_direct_parent_irq;
+static DEFINE_PER_CPU(struct aplic_idc, aplic_idcs);
+
+static void aplic_direct_irq_eoi(struct irq_data *d)
+{
+ /*
+ * The fasteoi_handler requires irq_eoi() callback hence
+ * provide a dummy handler.
+ */
+}
+
+#ifdef CONFIG_SMP
+static int aplic_direct_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ bool force)
+{
+ struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
+ struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv);
+ struct aplic_idc *idc;
+ unsigned int cpu, val;
+ void __iomem *target;
+
+ if (force)
+ cpu = cpumask_first_and(&direct->lmask, mask_val);
+ else
+ cpu = cpumask_first_and_and(&direct->lmask, mask_val, cpu_online_mask);
+
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ idc = per_cpu_ptr(&aplic_idcs, cpu);
+ target = priv->regs + APLIC_TARGET_BASE + (d->hwirq - 1) * sizeof(u32);
+ val = FIELD_PREP(APLIC_TARGET_HART_IDX, idc->hart_index);
+ val |= FIELD_PREP(APLIC_TARGET_IPRIO, APLIC_DEFAULT_PRIORITY);
+ writel(val, target);
+
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+#endif
+
+static struct irq_chip aplic_direct_chip = {
+ .name = "APLIC-DIRECT",
+ .irq_mask = aplic_irq_mask,
+ .irq_unmask = aplic_irq_unmask,
+ .irq_set_type = aplic_irq_set_type,
+ .irq_eoi = aplic_direct_irq_eoi,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = aplic_direct_set_affinity,
+#endif
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int aplic_direct_irqdomain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *hwirq, unsigned int *type)
+{
+ struct aplic_priv *priv = d->host_data;
+
+ return aplic_irqdomain_translate(fwspec, priv->gsi_base, hwirq, type);
+}
+
+static int aplic_direct_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct aplic_priv *priv = domain->host_data;
+ struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv);
+ struct irq_fwspec *fwspec = arg;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ int i, ret;
+
+ ret = aplic_irqdomain_translate(fwspec, priv->gsi_base, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, hwirq + i, &aplic_direct_chip,
+ priv, handle_fasteoi_irq, NULL, NULL);
+ irq_set_affinity(virq + i, &direct->lmask);
+ }
+
+ return 0;
+}
+
+static const struct irq_domain_ops aplic_direct_irqdomain_ops = {
+ .translate = aplic_direct_irqdomain_translate,
+ .alloc = aplic_direct_irqdomain_alloc,
+ .free = irq_domain_free_irqs_top,
+};
+
+/*
+ * To handle an APLIC direct interrupts, we just read the CLAIMI register
+ * which will return highest priority pending interrupt and clear the
+ * pending bit of the interrupt. This process is repeated until CLAIMI
+ * register return zero value.
+ */
+static void aplic_direct_handle_irq(struct irq_desc *desc)
+{
+ struct aplic_idc *idc = this_cpu_ptr(&aplic_idcs);
+ struct irq_domain *irqdomain = idc->direct->irqdomain;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ irq_hw_number_t hw_irq;
+ int irq;
+
+ chained_irq_enter(chip, desc);
+
+ while ((hw_irq = readl(idc->regs + APLIC_IDC_CLAIMI))) {
+ hw_irq = hw_irq >> APLIC_IDC_TOPI_ID_SHIFT;
+ irq = irq_find_mapping(irqdomain, hw_irq);
+
+ if (unlikely(irq <= 0)) {
+ dev_warn_ratelimited(idc->direct->priv.dev,
+ "hw_irq %lu mapping not found\n", hw_irq);
+ } else {
+ generic_handle_irq(irq);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void aplic_idc_set_delivery(struct aplic_idc *idc, bool en)
+{
+ u32 de = (en) ? APLIC_ENABLE_IDELIVERY : APLIC_DISABLE_IDELIVERY;
+ u32 th = (en) ? APLIC_ENABLE_ITHRESHOLD : APLIC_DISABLE_ITHRESHOLD;
+
+ /* Priority must be less than threshold for interrupt triggering */
+ writel(th, idc->regs + APLIC_IDC_ITHRESHOLD);
+
+ /* Delivery must be set to 1 for interrupt triggering */
+ writel(de, idc->regs + APLIC_IDC_IDELIVERY);
+}
+
+static int aplic_direct_dying_cpu(unsigned int cpu)
+{
+ if (aplic_direct_parent_irq)
+ disable_percpu_irq(aplic_direct_parent_irq);
+
+ return 0;
+}
+
+static int aplic_direct_starting_cpu(unsigned int cpu)
+{
+ if (aplic_direct_parent_irq) {
+ enable_percpu_irq(aplic_direct_parent_irq,
+ irq_get_trigger_type(aplic_direct_parent_irq));
+ }
+
+ return 0;
+}
+
+static int aplic_direct_parse_parent_hwirq(struct device *dev, u32 index,
+ u32 *parent_hwirq, unsigned long *parent_hartid)
+{
+ struct of_phandle_args parent;
+ int rc;
+
+ /*
+ * Currently, only OF fwnode is supported so extend this
+ * function for ACPI support.
+ */
+ if (!is_of_node(dev->fwnode))
+ return -EINVAL;
+
+ rc = of_irq_parse_one(to_of_node(dev->fwnode), index, &parent);
+ if (rc)
+ return rc;
+
+ rc = riscv_of_parent_hartid(parent.np, parent_hartid);
+ if (rc)
+ return rc;
+
+ *parent_hwirq = parent.args[0];
+ return 0;
+}
+
+int aplic_direct_setup(struct device *dev, void __iomem *regs)
+{
+ int i, j, rc, cpu, current_cpu, setup_count = 0;
+ struct aplic_direct *direct;
+ struct irq_domain *domain;
+ struct aplic_priv *priv;
+ struct aplic_idc *idc;
+ unsigned long hartid;
+ u32 v, hwirq;
+
+ direct = devm_kzalloc(dev, sizeof(*direct), GFP_KERNEL);
+ if (!direct)
+ return -ENOMEM;
+ priv = &direct->priv;
+
+ rc = aplic_setup_priv(priv, dev, regs);
+ if (rc) {
+ dev_err(dev, "failed to create APLIC context\n");
+ return rc;
+ }
+
+ /* Setup per-CPU IDC and target CPU mask */
+ current_cpu = get_cpu();
+ for (i = 0; i < priv->nr_idcs; i++) {
+ rc = aplic_direct_parse_parent_hwirq(dev, i, &hwirq, &hartid);
+ if (rc) {
+ dev_warn(dev, "parent irq for IDC%d not found\n", i);
+ continue;
+ }
+
+ /*
+ * Skip interrupts other than external interrupts for
+ * current privilege level.
+ */
+ if (hwirq != RV_IRQ_EXT)
+ continue;
+
+ cpu = riscv_hartid_to_cpuid(hartid);
+ if (cpu < 0) {
+ dev_warn(dev, "invalid cpuid for IDC%d\n", i);
+ continue;
+ }
+
+ cpumask_set_cpu(cpu, &direct->lmask);
+
+ idc = per_cpu_ptr(&aplic_idcs, cpu);
+ idc->hart_index = i;
+ idc->regs = priv->regs + APLIC_IDC_BASE + i * APLIC_IDC_SIZE;
+ idc->direct = direct;
+
+ aplic_idc_set_delivery(idc, true);
+
+ /*
+ * Boot cpu might not have APLIC hart_index = 0 so check
+ * and update target registers of all interrupts.
+ */
+ if (cpu == current_cpu && idc->hart_index) {
+ v = FIELD_PREP(APLIC_TARGET_HART_IDX, idc->hart_index);
+ v |= FIELD_PREP(APLIC_TARGET_IPRIO, APLIC_DEFAULT_PRIORITY);
+ for (j = 1; j <= priv->nr_irqs; j++)
+ writel(v, priv->regs + APLIC_TARGET_BASE + (j - 1) * sizeof(u32));
+ }
+
+ setup_count++;
+ }
+ put_cpu();
+
+ /* Find parent domain and register chained handler */
+ domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
+ DOMAIN_BUS_ANY);
+ if (!aplic_direct_parent_irq && domain) {
+ aplic_direct_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
+ if (aplic_direct_parent_irq) {
+ irq_set_chained_handler(aplic_direct_parent_irq,
+ aplic_direct_handle_irq);
+
+ /*
+ * Setup CPUHP notifier to enable parent
+ * interrupt on all CPUs
+ */
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "irqchip/riscv/aplic:starting",
+ aplic_direct_starting_cpu,
+ aplic_direct_dying_cpu);
+ }
+ }
+
+ /* Fail if we were not able to setup IDC for any CPU */
+ if (!setup_count)
+ return -ENODEV;
+
+ /* Setup global config and interrupt delivery */
+ aplic_init_hw_global(priv, false);
+
+ /* Create irq domain instance for the APLIC */
+ direct->irqdomain = irq_domain_create_linear(dev->fwnode, priv->nr_irqs + 1,
+ &aplic_direct_irqdomain_ops, priv);
+ if (!direct->irqdomain) {
+ dev_err(dev, "failed to create direct irq domain\n");
+ return -ENOMEM;
+ }
+
+ /* Advertise the interrupt controller */
+ dev_info(dev, "%d interrupts directly connected to %d CPUs\n",
+ priv->nr_irqs, priv->nr_idcs);
+
+ return 0;
+}
diff --git a/drivers/irqchip/irq-riscv-aplic-main.c b/drivers/irqchip/irq-riscv-aplic-main.c
new file mode 100644
index 000000000000..774a0c97fdab
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-aplic-main.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/irqchip/riscv-aplic.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+
+#include "irq-riscv-aplic-main.h"
+
+void aplic_irq_unmask(struct irq_data *d)
+{
+ struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
+
+ writel(d->hwirq, priv->regs + APLIC_SETIENUM);
+}
+
+void aplic_irq_mask(struct irq_data *d)
+{
+ struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
+
+ writel(d->hwirq, priv->regs + APLIC_CLRIENUM);
+}
+
+int aplic_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
+ void __iomem *sourcecfg;
+ u32 val = 0;
+
+ switch (type) {
+ case IRQ_TYPE_NONE:
+ val = APLIC_SOURCECFG_SM_INACTIVE;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ val = APLIC_SOURCECFG_SM_LEVEL_LOW;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ val = APLIC_SOURCECFG_SM_LEVEL_HIGH;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ val = APLIC_SOURCECFG_SM_EDGE_FALL;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ val = APLIC_SOURCECFG_SM_EDGE_RISE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ sourcecfg = priv->regs + APLIC_SOURCECFG_BASE;
+ sourcecfg += (d->hwirq - 1) * sizeof(u32);
+ writel(val, sourcecfg);
+
+ return 0;
+}
+
+int aplic_irqdomain_translate(struct irq_fwspec *fwspec, u32 gsi_base,
+ unsigned long *hwirq, unsigned int *type)
+{
+ if (WARN_ON(fwspec->param_count < 2))
+ return -EINVAL;
+ if (WARN_ON(!fwspec->param[0]))
+ return -EINVAL;
+
+ /* For DT, gsi_base is always zero. */
+ *hwirq = fwspec->param[0] - gsi_base;
+ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+
+ WARN_ON(*type == IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+void aplic_init_hw_global(struct aplic_priv *priv, bool msi_mode)
+{
+ u32 val;
+#ifdef CONFIG_RISCV_M_MODE
+ u32 valh;
+
+ if (msi_mode) {
+ val = lower_32_bits(priv->msicfg.base_ppn);
+ valh = FIELD_PREP(APLIC_xMSICFGADDRH_BAPPN, upper_32_bits(priv->msicfg.base_ppn));
+ valh |= FIELD_PREP(APLIC_xMSICFGADDRH_LHXW, priv->msicfg.lhxw);
+ valh |= FIELD_PREP(APLIC_xMSICFGADDRH_HHXW, priv->msicfg.hhxw);
+ valh |= FIELD_PREP(APLIC_xMSICFGADDRH_LHXS, priv->msicfg.lhxs);
+ valh |= FIELD_PREP(APLIC_xMSICFGADDRH_HHXS, priv->msicfg.hhxs);
+ writel(val, priv->regs + APLIC_xMSICFGADDR);
+ writel(valh, priv->regs + APLIC_xMSICFGADDRH);
+ }
+#endif
+
+ /* Setup APLIC domaincfg register */
+ val = readl(priv->regs + APLIC_DOMAINCFG);
+ val |= APLIC_DOMAINCFG_IE;
+ if (msi_mode)
+ val |= APLIC_DOMAINCFG_DM;
+ writel(val, priv->regs + APLIC_DOMAINCFG);
+ if (readl(priv->regs + APLIC_DOMAINCFG) != val)
+ dev_warn(priv->dev, "unable to write 0x%x in domaincfg\n", val);
+}
+
+static void aplic_init_hw_irqs(struct aplic_priv *priv)
+{
+ int i;
+
+ /* Disable all interrupts */
+ for (i = 0; i <= priv->nr_irqs; i += 32)
+ writel(-1U, priv->regs + APLIC_CLRIE_BASE + (i / 32) * sizeof(u32));
+
+ /* Set interrupt type and default priority for all interrupts */
+ for (i = 1; i <= priv->nr_irqs; i++) {
+ writel(0, priv->regs + APLIC_SOURCECFG_BASE + (i - 1) * sizeof(u32));
+ writel(APLIC_DEFAULT_PRIORITY,
+ priv->regs + APLIC_TARGET_BASE + (i - 1) * sizeof(u32));
+ }
+
+ /* Clear APLIC domaincfg */
+ writel(0, priv->regs + APLIC_DOMAINCFG);
+}
+
+int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *regs)
+{
+ struct of_phandle_args parent;
+ int rc;
+
+ /*
+ * Currently, only OF fwnode is supported so extend this
+ * function for ACPI support.
+ */
+ if (!is_of_node(dev->fwnode))
+ return -EINVAL;
+
+ /* Save device pointer and register base */
+ priv->dev = dev;
+ priv->regs = regs;
+
+ /* Find out number of interrupt sources */
+ rc = of_property_read_u32(to_of_node(dev->fwnode), "riscv,num-sources",
+ &priv->nr_irqs);
+ if (rc) {
+ dev_err(dev, "failed to get number of interrupt sources\n");
+ return rc;
+ }
+
+ /*
+ * Find out number of IDCs based on parent interrupts
+ *
+ * If "msi-parent" property is present then we ignore the
+ * APLIC IDCs which forces the APLIC driver to use MSI mode.
+ */
+ if (!of_property_present(to_of_node(dev->fwnode), "msi-parent")) {
+ while (!of_irq_parse_one(to_of_node(dev->fwnode), priv->nr_idcs, &parent))
+ priv->nr_idcs++;
+ }
+
+ /* Setup initial state APLIC interrupts */
+ aplic_init_hw_irqs(priv);
+
+ return 0;
+}
+
+static int aplic_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ bool msi_mode = false;
+ void __iomem *regs;
+ int rc;
+
+ /* Map the MMIO registers */
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (!regs) {
+ dev_err(dev, "failed map MMIO registers\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * If msi-parent property is present then setup APLIC MSI
+ * mode otherwise setup APLIC direct mode.
+ */
+ if (is_of_node(dev->fwnode))
+ msi_mode = of_property_present(to_of_node(dev->fwnode), "msi-parent");
+ if (msi_mode)
+ rc = aplic_msi_setup(dev, regs);
+ else
+ rc = aplic_direct_setup(dev, regs);
+ if (rc)
+ dev_err(dev, "failed to setup APLIC in %s mode\n", msi_mode ? "MSI" : "direct");
+
+ return rc;
+}
+
+static const struct of_device_id aplic_match[] = {
+ { .compatible = "riscv,aplic" },
+ {}
+};
+
+static struct platform_driver aplic_driver = {
+ .driver = {
+ .name = "riscv-aplic",
+ .of_match_table = aplic_match,
+ },
+ .probe = aplic_probe,
+};
+builtin_platform_driver(aplic_driver);
diff --git a/drivers/irqchip/irq-riscv-aplic-main.h b/drivers/irqchip/irq-riscv-aplic-main.h
new file mode 100644
index 000000000000..4393927d8c80
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-aplic-main.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#ifndef _IRQ_RISCV_APLIC_MAIN_H
+#define _IRQ_RISCV_APLIC_MAIN_H
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/fwnode.h>
+
+#define APLIC_DEFAULT_PRIORITY 1
+
+struct aplic_msicfg {
+ phys_addr_t base_ppn;
+ u32 hhxs;
+ u32 hhxw;
+ u32 lhxs;
+ u32 lhxw;
+};
+
+struct aplic_priv {
+ struct device *dev;
+ u32 gsi_base;
+ u32 nr_irqs;
+ u32 nr_idcs;
+ void __iomem *regs;
+ struct aplic_msicfg msicfg;
+};
+
+void aplic_irq_unmask(struct irq_data *d);
+void aplic_irq_mask(struct irq_data *d);
+int aplic_irq_set_type(struct irq_data *d, unsigned int type);
+int aplic_irqdomain_translate(struct irq_fwspec *fwspec, u32 gsi_base,
+ unsigned long *hwirq, unsigned int *type);
+void aplic_init_hw_global(struct aplic_priv *priv, bool msi_mode);
+int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *regs);
+int aplic_direct_setup(struct device *dev, void __iomem *regs);
+#ifdef CONFIG_RISCV_APLIC_MSI
+int aplic_msi_setup(struct device *dev, void __iomem *regs);
+#else
+static inline int aplic_msi_setup(struct device *dev, void __iomem *regs)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/drivers/irqchip/irq-riscv-aplic-msi.c b/drivers/irqchip/irq-riscv-aplic-msi.c
new file mode 100644
index 000000000000..028444af48bd
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-aplic-msi.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/riscv-aplic.h>
+#include <linux/irqchip/riscv-imsic.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/smp.h>
+
+#include "irq-riscv-aplic-main.h"
+
+static void aplic_msi_irq_mask(struct irq_data *d)
+{
+ aplic_irq_mask(d);
+ irq_chip_mask_parent(d);
+}
+
+static void aplic_msi_irq_unmask(struct irq_data *d)
+{
+ irq_chip_unmask_parent(d);
+ aplic_irq_unmask(d);
+}
+
+static void aplic_msi_irq_eoi(struct irq_data *d)
+{
+ struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
+
+ /*
+ * EOI handling is required only for level-triggered interrupts
+ * when APLIC is in MSI mode.
+ */
+
+ switch (irqd_get_trigger_type(d)) {
+ case IRQ_TYPE_LEVEL_LOW:
+ case IRQ_TYPE_LEVEL_HIGH:
+ /*
+ * The section "4.9.2 Special consideration for level-sensitive interrupt
+ * sources" of the RISC-V AIA specification says:
+ *
+ * A second option is for the interrupt service routine to write the
+ * APLIC’s source identity number for the interrupt to the domain’s
+ * setipnum register just before exiting. This will cause the interrupt’s
+ * pending bit to be set to one again if the source is still asserting
+ * an interrupt, but not if the source is not asserting an interrupt.
+ */
+ writel(d->hwirq, priv->regs + APLIC_SETIPNUM_LE);
+ break;
+ }
+}
+
+static void aplic_msi_write_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ unsigned int group_index, hart_index, guest_index, val;
+ struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
+ struct aplic_msicfg *mc = &priv->msicfg;
+ phys_addr_t tppn, tbppn, msg_addr;
+ void __iomem *target;
+
+ /* For zeroed MSI, simply write zero into the target register */
+ if (!msg->address_hi && !msg->address_lo && !msg->data) {
+ target = priv->regs + APLIC_TARGET_BASE;
+ target += (d->hwirq - 1) * sizeof(u32);
+ writel(0, target);
+ return;
+ }
+
+ /* Sanity check on message data */
+ WARN_ON(msg->data > APLIC_TARGET_EIID_MASK);
+
+ /* Compute target MSI address */
+ msg_addr = (((u64)msg->address_hi) << 32) | msg->address_lo;
+ tppn = msg_addr >> APLIC_xMSICFGADDR_PPN_SHIFT;
+
+ /* Compute target HART Base PPN */
+ tbppn = tppn;
+ tbppn &= ~APLIC_xMSICFGADDR_PPN_HART(mc->lhxs);
+ tbppn &= ~APLIC_xMSICFGADDR_PPN_LHX(mc->lhxw, mc->lhxs);
+ tbppn &= ~APLIC_xMSICFGADDR_PPN_HHX(mc->hhxw, mc->hhxs);
+ WARN_ON(tbppn != mc->base_ppn);
+
+ /* Compute target group and hart indexes */
+ group_index = (tppn >> APLIC_xMSICFGADDR_PPN_HHX_SHIFT(mc->hhxs)) &
+ APLIC_xMSICFGADDR_PPN_HHX_MASK(mc->hhxw);
+ hart_index = (tppn >> APLIC_xMSICFGADDR_PPN_LHX_SHIFT(mc->lhxs)) &
+ APLIC_xMSICFGADDR_PPN_LHX_MASK(mc->lhxw);
+ hart_index |= (group_index << mc->lhxw);
+ WARN_ON(hart_index > APLIC_TARGET_HART_IDX_MASK);
+
+ /* Compute target guest index */
+ guest_index = tppn & APLIC_xMSICFGADDR_PPN_HART(mc->lhxs);
+ WARN_ON(guest_index > APLIC_TARGET_GUEST_IDX_MASK);
+
+ /* Update IRQ TARGET register */
+ target = priv->regs + APLIC_TARGET_BASE;
+ target += (d->hwirq - 1) * sizeof(u32);
+ val = FIELD_PREP(APLIC_TARGET_HART_IDX, hart_index);
+ val |= FIELD_PREP(APLIC_TARGET_GUEST_IDX, guest_index);
+ val |= FIELD_PREP(APLIC_TARGET_EIID, msg->data);
+ writel(val, target);
+}
+
+static void aplic_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = (u32)desc->data.icookie.value;
+}
+
+static int aplic_msi_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *hwirq, unsigned int *type)
+{
+ struct msi_domain_info *info = d->host_data;
+ struct aplic_priv *priv = info->data;
+
+ return aplic_irqdomain_translate(fwspec, priv->gsi_base, hwirq, type);
+}
+
+static const struct msi_domain_template aplic_msi_template = {
+ .chip = {
+ .name = "APLIC-MSI",
+ .irq_mask = aplic_msi_irq_mask,
+ .irq_unmask = aplic_msi_irq_unmask,
+ .irq_set_type = aplic_irq_set_type,
+ .irq_eoi = aplic_msi_irq_eoi,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+ .irq_write_msi_msg = aplic_msi_write_msg,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+ },
+
+ .ops = {
+ .set_desc = aplic_msi_set_desc,
+ .msi_translate = aplic_msi_translate,
+ },
+
+ .info = {
+ .bus_token = DOMAIN_BUS_WIRED_TO_MSI,
+ .flags = MSI_FLAG_USE_DEV_FWNODE,
+ .handler = handle_fasteoi_irq,
+ .handler_name = "fasteoi",
+ },
+};
+
+int aplic_msi_setup(struct device *dev, void __iomem *regs)
+{
+ const struct imsic_global_config *imsic_global;
+ struct aplic_priv *priv;
+ struct aplic_msicfg *mc;
+ phys_addr_t pa;
+ int rc;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ rc = aplic_setup_priv(priv, dev, regs);
+ if (rc) {
+ dev_err(dev, "failed to create APLIC context\n");
+ return rc;
+ }
+ mc = &priv->msicfg;
+
+ /*
+ * The APLIC outgoing MSI config registers assume target MSI
+ * controller to be RISC-V AIA IMSIC controller.
+ */
+ imsic_global = imsic_get_global_config();
+ if (!imsic_global) {
+ dev_err(dev, "IMSIC global config not found\n");
+ return -ENODEV;
+ }
+
+ /* Find number of guest index bits (LHXS) */
+ mc->lhxs = imsic_global->guest_index_bits;
+ if (APLIC_xMSICFGADDRH_LHXS_MASK < mc->lhxs) {
+ dev_err(dev, "IMSIC guest index bits big for APLIC LHXS\n");
+ return -EINVAL;
+ }
+
+ /* Find number of HART index bits (LHXW) */
+ mc->lhxw = imsic_global->hart_index_bits;
+ if (APLIC_xMSICFGADDRH_LHXW_MASK < mc->lhxw) {
+ dev_err(dev, "IMSIC hart index bits big for APLIC LHXW\n");
+ return -EINVAL;
+ }
+
+ /* Find number of group index bits (HHXW) */
+ mc->hhxw = imsic_global->group_index_bits;
+ if (APLIC_xMSICFGADDRH_HHXW_MASK < mc->hhxw) {
+ dev_err(dev, "IMSIC group index bits big for APLIC HHXW\n");
+ return -EINVAL;
+ }
+
+ /* Find first bit position of group index (HHXS) */
+ mc->hhxs = imsic_global->group_index_shift;
+ if (mc->hhxs < (2 * APLIC_xMSICFGADDR_PPN_SHIFT)) {
+ dev_err(dev, "IMSIC group index shift should be >= %d\n",
+ (2 * APLIC_xMSICFGADDR_PPN_SHIFT));
+ return -EINVAL;
+ }
+ mc->hhxs -= (2 * APLIC_xMSICFGADDR_PPN_SHIFT);
+ if (APLIC_xMSICFGADDRH_HHXS_MASK < mc->hhxs) {
+ dev_err(dev, "IMSIC group index shift big for APLIC HHXS\n");
+ return -EINVAL;
+ }
+
+ /* Compute PPN base */
+ mc->base_ppn = imsic_global->base_addr >> APLIC_xMSICFGADDR_PPN_SHIFT;
+ mc->base_ppn &= ~APLIC_xMSICFGADDR_PPN_HART(mc->lhxs);
+ mc->base_ppn &= ~APLIC_xMSICFGADDR_PPN_LHX(mc->lhxw, mc->lhxs);
+ mc->base_ppn &= ~APLIC_xMSICFGADDR_PPN_HHX(mc->hhxw, mc->hhxs);
+
+ /* Setup global config and interrupt delivery */
+ aplic_init_hw_global(priv, true);
+
+ /* Set the APLIC device MSI domain if not available */
+ if (!dev_get_msi_domain(dev)) {
+ /*
+ * The device MSI domain for OF devices is only set at the
+ * time of populating/creating OF device. If the device MSI
+ * domain is discovered later after the OF device is created
+ * then we need to set it explicitly before using any platform
+ * MSI functions.
+ *
+ * In case of APLIC device, the parent MSI domain is always
+ * IMSIC and the IMSIC MSI domains are created later through
+ * the platform driver probing so we set it explicitly here.
+ */
+ if (is_of_node(dev->fwnode))
+ of_msi_configure(dev, to_of_node(dev->fwnode));
+ }
+
+ if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, &aplic_msi_template,
+ priv->nr_irqs + 1, priv, priv)) {
+ dev_err(dev, "failed to create MSI irq domain\n");
+ return -ENOMEM;
+ }
+
+ /* Advertise the interrupt controller */
+ pa = priv->msicfg.base_ppn << APLIC_xMSICFGADDR_PPN_SHIFT;
+ dev_info(dev, "%d interrupts forwarded to MSI base %pa\n", priv->nr_irqs, &pa);
+
+ return 0;
+}
diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c
new file mode 100644
index 000000000000..886418ec06cb
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-imsic-early.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#define pr_fmt(fmt) "riscv-imsic: " fmt
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+
+#include "irq-riscv-imsic-state.h"
+
+static int imsic_parent_irq;
+
+#ifdef CONFIG_SMP
+static void imsic_ipi_send(unsigned int cpu)
+{
+ struct imsic_local_config *local = per_cpu_ptr(imsic->global.local, cpu);
+
+ writel_relaxed(IMSIC_IPI_ID, local->msi_va);
+}
+
+static void imsic_ipi_starting_cpu(void)
+{
+ /* Enable IPIs for current CPU. */
+ __imsic_id_set_enable(IMSIC_IPI_ID);
+}
+
+static void imsic_ipi_dying_cpu(void)
+{
+ /* Disable IPIs for current CPU. */
+ __imsic_id_clear_enable(IMSIC_IPI_ID);
+}
+
+static int __init imsic_ipi_domain_init(void)
+{
+ int virq;
+
+ /* Create IMSIC IPI multiplexing */
+ virq = ipi_mux_create(IMSIC_NR_IPI, imsic_ipi_send);
+ if (virq <= 0)
+ return virq < 0 ? virq : -ENOMEM;
+
+ /* Set vIRQ range */
+ riscv_ipi_set_virq_range(virq, IMSIC_NR_IPI, true);
+
+ /* Announce that IMSIC is providing IPIs */
+ pr_info("%pfwP: providing IPIs using interrupt %d\n", imsic->fwnode, IMSIC_IPI_ID);
+
+ return 0;
+}
+#else
+static void imsic_ipi_starting_cpu(void) { }
+static void imsic_ipi_dying_cpu(void) { }
+static int __init imsic_ipi_domain_init(void) { return 0; }
+#endif
+
+/*
+ * To handle an interrupt, we read the TOPEI CSR and write zero in one
+ * instruction. If TOPEI CSR is non-zero then we translate TOPEI.ID to
+ * Linux interrupt number and let Linux IRQ subsystem handle it.
+ */
+static void imsic_handle_irq(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ int err, cpu = smp_processor_id();
+ struct imsic_vector *vec;
+ unsigned long local_id;
+
+ chained_irq_enter(chip, desc);
+
+ while ((local_id = csr_swap(CSR_TOPEI, 0))) {
+ local_id >>= TOPEI_ID_SHIFT;
+
+ if (local_id == IMSIC_IPI_ID) {
+ if (IS_ENABLED(CONFIG_SMP))
+ ipi_mux_process();
+ continue;
+ }
+
+ if (unlikely(!imsic->base_domain))
+ continue;
+
+ vec = imsic_vector_from_local_id(cpu, local_id);
+ if (!vec) {
+ pr_warn_ratelimited("vector not found for local ID 0x%lx\n", local_id);
+ continue;
+ }
+
+ err = generic_handle_domain_irq(imsic->base_domain, vec->hwirq);
+ if (unlikely(err))
+ pr_warn_ratelimited("hwirq 0x%x mapping not found\n", vec->hwirq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int imsic_starting_cpu(unsigned int cpu)
+{
+ /* Mark per-CPU IMSIC state as online */
+ imsic_state_online();
+
+ /* Enable per-CPU parent interrupt */
+ enable_percpu_irq(imsic_parent_irq, irq_get_trigger_type(imsic_parent_irq));
+
+ /* Setup IPIs */
+ imsic_ipi_starting_cpu();
+
+ /*
+ * Interrupts identities might have been enabled/disabled while
+ * this CPU was not running so sync-up local enable/disable state.
+ */
+ imsic_local_sync_all();
+
+ /* Enable local interrupt delivery */
+ imsic_local_delivery(true);
+
+ return 0;
+}
+
+static int imsic_dying_cpu(unsigned int cpu)
+{
+ /* Cleanup IPIs */
+ imsic_ipi_dying_cpu();
+
+ /* Mark per-CPU IMSIC state as offline */
+ imsic_state_offline();
+
+ return 0;
+}
+
+static int __init imsic_early_probe(struct fwnode_handle *fwnode)
+{
+ struct irq_domain *domain;
+ int rc;
+
+ /* Find parent domain and register chained handler */
+ domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
+ if (!domain) {
+ pr_err("%pfwP: Failed to find INTC domain\n", fwnode);
+ return -ENOENT;
+ }
+ imsic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
+ if (!imsic_parent_irq) {
+ pr_err("%pfwP: Failed to create INTC mapping\n", fwnode);
+ return -ENOENT;
+ }
+
+ /* Initialize IPI domain */
+ rc = imsic_ipi_domain_init();
+ if (rc) {
+ pr_err("%pfwP: Failed to initialize IPI domain\n", fwnode);
+ return rc;
+ }
+
+ /* Setup chained handler to the parent domain interrupt */
+ irq_set_chained_handler(imsic_parent_irq, imsic_handle_irq);
+
+ /*
+ * Setup cpuhp state (must be done after setting imsic_parent_irq)
+ *
+ * Don't disable per-CPU IMSIC file when CPU goes offline
+ * because this affects IPI and the masking/unmasking of
+ * virtual IPIs is done via generic IPI-Mux
+ */
+ cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_IMSIC_STARTING, "irqchip/riscv/imsic:starting",
+ imsic_starting_cpu, imsic_dying_cpu);
+
+ return 0;
+}
+
+static int __init imsic_early_dt_init(struct device_node *node, struct device_node *parent)
+{
+ struct fwnode_handle *fwnode = &node->fwnode;
+ int rc;
+
+ /* Setup IMSIC state */
+ rc = imsic_setup_state(fwnode);
+ if (rc) {
+ pr_err("%pfwP: failed to setup state (error %d)\n", fwnode, rc);
+ return rc;
+ }
+
+ /* Do early setup of IPIs */
+ rc = imsic_early_probe(fwnode);
+ if (rc)
+ return rc;
+
+ /* Ensure that OF platform device gets probed */
+ of_node_clear_flag(node, OF_POPULATED);
+ return 0;
+}
+
+IRQCHIP_DECLARE(riscv_imsic, "riscv,imsics", imsic_early_dt_init);
diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c
new file mode 100644
index 000000000000..11723a763c10
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-imsic-platform.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#define pr_fmt(fmt) "riscv-imsic: " fmt
+#include <linux/bitmap.h>
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+
+#include "irq-riscv-imsic-state.h"
+
+static bool imsic_cpu_page_phys(unsigned int cpu, unsigned int guest_index,
+ phys_addr_t *out_msi_pa)
+{
+ struct imsic_global_config *global;
+ struct imsic_local_config *local;
+
+ global = &imsic->global;
+ local = per_cpu_ptr(global->local, cpu);
+
+ if (BIT(global->guest_index_bits) <= guest_index)
+ return false;
+
+ if (out_msi_pa)
+ *out_msi_pa = local->msi_pa + (guest_index * IMSIC_MMIO_PAGE_SZ);
+
+ return true;
+}
+
+static void imsic_irq_mask(struct irq_data *d)
+{
+ imsic_vector_mask(irq_data_get_irq_chip_data(d));
+}
+
+static void imsic_irq_unmask(struct irq_data *d)
+{
+ imsic_vector_unmask(irq_data_get_irq_chip_data(d));
+}
+
+static int imsic_irq_retrigger(struct irq_data *d)
+{
+ struct imsic_vector *vec = irq_data_get_irq_chip_data(d);
+ struct imsic_local_config *local;
+
+ if (WARN_ON(!vec))
+ return -ENOENT;
+
+ local = per_cpu_ptr(imsic->global.local, vec->cpu);
+ writel_relaxed(vec->local_id, local->msi_va);
+ return 0;
+}
+
+static void imsic_irq_compose_vector_msg(struct imsic_vector *vec, struct msi_msg *msg)
+{
+ phys_addr_t msi_addr;
+
+ if (WARN_ON(!vec))
+ return;
+
+ if (WARN_ON(!imsic_cpu_page_phys(vec->cpu, 0, &msi_addr)))
+ return;
+
+ msg->address_hi = upper_32_bits(msi_addr);
+ msg->address_lo = lower_32_bits(msi_addr);
+ msg->data = vec->local_id;
+}
+
+static void imsic_irq_compose_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ imsic_irq_compose_vector_msg(irq_data_get_irq_chip_data(d), msg);
+}
+
+#ifdef CONFIG_SMP
+static void imsic_msi_update_msg(struct irq_data *d, struct imsic_vector *vec)
+{
+ struct msi_msg msg = { };
+
+ imsic_irq_compose_vector_msg(vec, &msg);
+ irq_data_get_irq_chip(d)->irq_write_msi_msg(d, &msg);
+}
+
+static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ bool force)
+{
+ struct imsic_vector *old_vec, *new_vec;
+ struct irq_data *pd = d->parent_data;
+
+ old_vec = irq_data_get_irq_chip_data(pd);
+ if (WARN_ON(!old_vec))
+ return -ENOENT;
+
+ /* If old vector cpu belongs to the target cpumask then do nothing */
+ if (cpumask_test_cpu(old_vec->cpu, mask_val))
+ return IRQ_SET_MASK_OK_DONE;
+
+ /* If move is already in-flight then return failure */
+ if (imsic_vector_get_move(old_vec))
+ return -EBUSY;
+
+ /* Get a new vector on the desired set of CPUs */
+ new_vec = imsic_vector_alloc(old_vec->hwirq, mask_val);
+ if (!new_vec)
+ return -ENOSPC;
+
+ /* Point device to the new vector */
+ imsic_msi_update_msg(d, new_vec);
+
+ /* Update irq descriptors with the new vector */
+ pd->chip_data = new_vec;
+
+ /* Update effective affinity of parent irq data */
+ irq_data_update_effective_affinity(pd, cpumask_of(new_vec->cpu));
+
+ /* Move state of the old vector to the new vector */
+ imsic_vector_move(old_vec, new_vec);
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+#endif
+
+static struct irq_chip imsic_irq_base_chip = {
+ .name = "IMSIC",
+ .irq_mask = imsic_irq_mask,
+ .irq_unmask = imsic_irq_unmask,
+ .irq_retrigger = imsic_irq_retrigger,
+ .irq_compose_msi_msg = imsic_irq_compose_msg,
+ .flags = IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int imsic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct imsic_vector *vec;
+
+ /* Multi-MSI is not supported yet. */
+ if (nr_irqs > 1)
+ return -EOPNOTSUPP;
+
+ vec = imsic_vector_alloc(virq, cpu_online_mask);
+ if (!vec)
+ return -ENOSPC;
+
+ irq_domain_set_info(domain, virq, virq, &imsic_irq_base_chip, vec,
+ handle_simple_irq, NULL, NULL);
+ irq_set_noprobe(virq);
+ irq_set_affinity(virq, cpu_online_mask);
+ irq_data_update_effective_affinity(irq_get_irq_data(virq), cpumask_of(vec->cpu));
+
+ return 0;
+}
+
+static void imsic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+
+ imsic_vector_free(irq_data_get_irq_chip_data(d));
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static int imsic_irq_domain_select(struct irq_domain *domain, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ const struct msi_parent_ops *ops = domain->msi_parent_ops;
+ u32 busmask = BIT(bus_token);
+
+ if (fwspec->fwnode != domain->fwnode || fwspec->param_count != 0)
+ return 0;
+
+ /* Handle pure domain searches */
+ if (bus_token == ops->bus_select_token)
+ return 1;
+
+ return !!(ops->bus_select_mask & busmask);
+}
+
+#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+static void imsic_irq_debug_show(struct seq_file *m, struct irq_domain *d,
+ struct irq_data *irqd, int ind)
+{
+ if (!irqd) {
+ imsic_vector_debug_show_summary(m, ind);
+ return;
+ }
+
+ imsic_vector_debug_show(m, irq_data_get_irq_chip_data(irqd), ind);
+}
+#endif
+
+static const struct irq_domain_ops imsic_base_domain_ops = {
+ .alloc = imsic_irq_domain_alloc,
+ .free = imsic_irq_domain_free,
+ .select = imsic_irq_domain_select,
+#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+ .debug_show = imsic_irq_debug_show,
+#endif
+};
+
+#ifdef CONFIG_RISCV_IMSIC_PCI
+
+static void imsic_pci_mask_irq(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void imsic_pci_unmask_irq(struct irq_data *d)
+{
+ irq_chip_unmask_parent(d);
+ pci_msi_unmask_irq(d);
+}
+
+#define MATCH_PCI_MSI BIT(DOMAIN_BUS_PCI_MSI)
+
+#else
+
+#define MATCH_PCI_MSI 0
+
+#endif
+
+static bool imsic_init_dev_msi_info(struct device *dev,
+ struct irq_domain *domain,
+ struct irq_domain *real_parent,
+ struct msi_domain_info *info)
+{
+ const struct msi_parent_ops *pops = real_parent->msi_parent_ops;
+
+ /* MSI parent domain specific settings */
+ switch (real_parent->bus_token) {
+ case DOMAIN_BUS_NEXUS:
+ if (WARN_ON_ONCE(domain != real_parent))
+ return false;
+#ifdef CONFIG_SMP
+ info->chip->irq_set_affinity = imsic_irq_set_affinity;
+#endif
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return false;
+ }
+
+ /* Is the target supported? */
+ switch (info->bus_token) {
+#ifdef CONFIG_RISCV_IMSIC_PCI
+ case DOMAIN_BUS_PCI_DEVICE_MSI:
+ case DOMAIN_BUS_PCI_DEVICE_MSIX:
+ info->chip->irq_mask = imsic_pci_mask_irq;
+ info->chip->irq_unmask = imsic_pci_unmask_irq;
+ break;
+#endif
+ case DOMAIN_BUS_DEVICE_MSI:
+ /*
+ * Per-device MSI should never have any MSI feature bits
+ * set. It's sole purpose is to create a dumb interrupt
+ * chip which has a device specific irq_write_msi_msg()
+ * callback.
+ */
+ if (WARN_ON_ONCE(info->flags))
+ return false;
+
+ /* Core managed MSI descriptors */
+ info->flags |= MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS |
+ MSI_FLAG_FREE_MSI_DESCS;
+ break;
+ case DOMAIN_BUS_WIRED_TO_MSI:
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return false;
+ }
+
+ /* Use hierarchial chip operations re-trigger */
+ info->chip->irq_retrigger = irq_chip_retrigger_hierarchy;
+
+ /*
+ * Mask out the domain specific MSI feature flags which are not
+ * supported by the real parent.
+ */
+ info->flags &= pops->supported_flags;
+
+ /* Enforce the required flags */
+ info->flags |= pops->required_flags;
+
+ return true;
+}
+
+#define MATCH_PLATFORM_MSI BIT(DOMAIN_BUS_PLATFORM_MSI)
+
+static const struct msi_parent_ops imsic_msi_parent_ops = {
+ .supported_flags = MSI_GENERIC_FLAGS_MASK |
+ MSI_FLAG_PCI_MSIX,
+ .required_flags = MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS,
+ .bus_select_token = DOMAIN_BUS_NEXUS,
+ .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
+ .init_dev_msi_info = imsic_init_dev_msi_info,
+};
+
+int imsic_irqdomain_init(void)
+{
+ struct imsic_global_config *global;
+
+ if (!imsic || !imsic->fwnode) {
+ pr_err("early driver not probed\n");
+ return -ENODEV;
+ }
+
+ if (imsic->base_domain) {
+ pr_err("%pfwP: irq domain already created\n", imsic->fwnode);
+ return -ENODEV;
+ }
+
+ /* Create Base IRQ domain */
+ imsic->base_domain = irq_domain_create_tree(imsic->fwnode,
+ &imsic_base_domain_ops, imsic);
+ if (!imsic->base_domain) {
+ pr_err("%pfwP: failed to create IMSIC base domain\n", imsic->fwnode);
+ return -ENOMEM;
+ }
+ imsic->base_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ imsic->base_domain->msi_parent_ops = &imsic_msi_parent_ops;
+
+ irq_domain_update_bus_token(imsic->base_domain, DOMAIN_BUS_NEXUS);
+
+ global = &imsic->global;
+ pr_info("%pfwP: hart-index-bits: %d, guest-index-bits: %d\n",
+ imsic->fwnode, global->hart_index_bits, global->guest_index_bits);
+ pr_info("%pfwP: group-index-bits: %d, group-index-shift: %d\n",
+ imsic->fwnode, global->group_index_bits, global->group_index_shift);
+ pr_info("%pfwP: per-CPU IDs %d at base PPN %pa\n",
+ imsic->fwnode, global->nr_ids, &global->base_addr);
+ pr_info("%pfwP: total %d interrupts available\n",
+ imsic->fwnode, num_possible_cpus() * (global->nr_ids - 1));
+
+ return 0;
+}
+
+static int imsic_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ if (imsic && imsic->fwnode != dev->fwnode) {
+ dev_err(dev, "fwnode mismatch\n");
+ return -ENODEV;
+ }
+
+ return imsic_irqdomain_init();
+}
+
+static const struct of_device_id imsic_platform_match[] = {
+ { .compatible = "riscv,imsics" },
+ {}
+};
+
+static struct platform_driver imsic_platform_driver = {
+ .driver = {
+ .name = "riscv-imsic",
+ .of_match_table = imsic_platform_match,
+ },
+ .probe = imsic_platform_probe,
+};
+builtin_platform_driver(imsic_platform_driver);
diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c
new file mode 100644
index 000000000000..5479f872e62b
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-imsic-state.c
@@ -0,0 +1,865 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#define pr_fmt(fmt) "riscv-imsic: " fmt
+#include <linux/cpu.h>
+#include <linux/bitmap.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <asm/hwcap.h>
+
+#include "irq-riscv-imsic-state.h"
+
+#define IMSIC_DISABLE_EIDELIVERY 0
+#define IMSIC_ENABLE_EIDELIVERY 1
+#define IMSIC_DISABLE_EITHRESHOLD 1
+#define IMSIC_ENABLE_EITHRESHOLD 0
+
+static inline void imsic_csr_write(unsigned long reg, unsigned long val)
+{
+ csr_write(CSR_ISELECT, reg);
+ csr_write(CSR_IREG, val);
+}
+
+static inline unsigned long imsic_csr_read(unsigned long reg)
+{
+ csr_write(CSR_ISELECT, reg);
+ return csr_read(CSR_IREG);
+}
+
+static inline unsigned long imsic_csr_read_clear(unsigned long reg, unsigned long val)
+{
+ csr_write(CSR_ISELECT, reg);
+ return csr_read_clear(CSR_IREG, val);
+}
+
+static inline void imsic_csr_set(unsigned long reg, unsigned long val)
+{
+ csr_write(CSR_ISELECT, reg);
+ csr_set(CSR_IREG, val);
+}
+
+static inline void imsic_csr_clear(unsigned long reg, unsigned long val)
+{
+ csr_write(CSR_ISELECT, reg);
+ csr_clear(CSR_IREG, val);
+}
+
+struct imsic_priv *imsic;
+
+const struct imsic_global_config *imsic_get_global_config(void)
+{
+ return imsic ? &imsic->global : NULL;
+}
+EXPORT_SYMBOL_GPL(imsic_get_global_config);
+
+static bool __imsic_eix_read_clear(unsigned long id, bool pend)
+{
+ unsigned long isel, imask;
+
+ isel = id / BITS_PER_LONG;
+ isel *= BITS_PER_LONG / IMSIC_EIPx_BITS;
+ isel += pend ? IMSIC_EIP0 : IMSIC_EIE0;
+ imask = BIT(id & (__riscv_xlen - 1));
+
+ return !!(imsic_csr_read_clear(isel, imask) & imask);
+}
+
+static inline bool __imsic_id_read_clear_enabled(unsigned long id)
+{
+ return __imsic_eix_read_clear(id, false);
+}
+
+static inline bool __imsic_id_read_clear_pending(unsigned long id)
+{
+ return __imsic_eix_read_clear(id, true);
+}
+
+void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend, bool val)
+{
+ unsigned long id = base_id, last_id = base_id + num_id;
+ unsigned long i, isel, ireg;
+
+ while (id < last_id) {
+ isel = id / BITS_PER_LONG;
+ isel *= BITS_PER_LONG / IMSIC_EIPx_BITS;
+ isel += pend ? IMSIC_EIP0 : IMSIC_EIE0;
+
+ /*
+ * Prepare the ID mask to be programmed in the
+ * IMSIC EIEx and EIPx registers. These registers
+ * are XLEN-wide and we must not touch IDs which
+ * are < base_id and >= (base_id + num_id).
+ */
+ ireg = 0;
+ for (i = id & (__riscv_xlen - 1); id < last_id && i < __riscv_xlen; i++) {
+ ireg |= BIT(i);
+ id++;
+ }
+
+ /*
+ * The IMSIC EIEx and EIPx registers are indirectly
+ * accessed via using ISELECT and IREG CSRs so we
+ * need to access these CSRs without getting preempted.
+ *
+ * All existing users of this function call this
+ * function with local IRQs disabled so we don't
+ * need to do anything special here.
+ */
+ if (val)
+ imsic_csr_set(isel, ireg);
+ else
+ imsic_csr_clear(isel, ireg);
+ }
+}
+
+static void __imsic_local_sync(struct imsic_local_priv *lpriv)
+{
+ struct imsic_local_config *mlocal;
+ struct imsic_vector *vec, *mvec;
+ int i;
+
+ lockdep_assert_held(&lpriv->lock);
+
+ for_each_set_bit(i, lpriv->dirty_bitmap, imsic->global.nr_ids + 1) {
+ if (!i || i == IMSIC_IPI_ID)
+ goto skip;
+ vec = &lpriv->vectors[i];
+
+ if (READ_ONCE(vec->enable))
+ __imsic_id_set_enable(i);
+ else
+ __imsic_id_clear_enable(i);
+
+ /*
+ * If the ID was being moved to a new ID on some other CPU
+ * then we can get a MSI during the movement so check the
+ * ID pending bit and re-trigger the new ID on other CPU
+ * using MMIO write.
+ */
+ mvec = READ_ONCE(vec->move);
+ WRITE_ONCE(vec->move, NULL);
+ if (mvec && mvec != vec) {
+ if (__imsic_id_read_clear_pending(i)) {
+ mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu);
+ writel_relaxed(mvec->local_id, mlocal->msi_va);
+ }
+
+ imsic_vector_free(&lpriv->vectors[i]);
+ }
+
+skip:
+ bitmap_clear(lpriv->dirty_bitmap, i, 1);
+ }
+}
+
+void imsic_local_sync_all(void)
+{
+ struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&lpriv->lock, flags);
+ bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
+ __imsic_local_sync(lpriv);
+ raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+}
+
+void imsic_local_delivery(bool enable)
+{
+ if (enable) {
+ imsic_csr_write(IMSIC_EITHRESHOLD, IMSIC_ENABLE_EITHRESHOLD);
+ imsic_csr_write(IMSIC_EIDELIVERY, IMSIC_ENABLE_EIDELIVERY);
+ return;
+ }
+
+ imsic_csr_write(IMSIC_EIDELIVERY, IMSIC_DISABLE_EIDELIVERY);
+ imsic_csr_write(IMSIC_EITHRESHOLD, IMSIC_DISABLE_EITHRESHOLD);
+}
+
+#ifdef CONFIG_SMP
+static void imsic_local_timer_callback(struct timer_list *timer)
+{
+ struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&lpriv->lock, flags);
+ __imsic_local_sync(lpriv);
+ raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+}
+
+static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu)
+{
+ lockdep_assert_held(&lpriv->lock);
+
+ /*
+ * The spinlock acquire/release semantics ensure that changes
+ * to vector enable, vector move and dirty bitmap are visible
+ * to the target CPU.
+ */
+
+ /*
+ * We schedule a timer on the target CPU if the target CPU is not
+ * same as the current CPU. An offline CPU will unconditionally
+ * synchronize IDs through imsic_starting_cpu() when the
+ * CPU is brought up.
+ */
+ if (cpu_online(cpu)) {
+ if (cpu == smp_processor_id()) {
+ __imsic_local_sync(lpriv);
+ return;
+ }
+
+ if (!timer_pending(&lpriv->timer)) {
+ lpriv->timer.expires = jiffies + 1;
+ add_timer_on(&lpriv->timer, cpu);
+ }
+ }
+}
+#else
+static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu)
+{
+ lockdep_assert_held(&lpriv->lock);
+ __imsic_local_sync(lpriv);
+}
+#endif
+
+void imsic_vector_mask(struct imsic_vector *vec)
+{
+ struct imsic_local_priv *lpriv;
+
+ lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
+ if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec))
+ return;
+
+ /*
+ * This function is called through Linux irq subsystem with
+ * irqs disabled so no need to save/restore irq flags.
+ */
+
+ raw_spin_lock(&lpriv->lock);
+
+ WRITE_ONCE(vec->enable, false);
+ bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
+ __imsic_remote_sync(lpriv, vec->cpu);
+
+ raw_spin_unlock(&lpriv->lock);
+}
+
+void imsic_vector_unmask(struct imsic_vector *vec)
+{
+ struct imsic_local_priv *lpriv;
+
+ lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
+ if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec))
+ return;
+
+ /*
+ * This function is called through Linux irq subsystem with
+ * irqs disabled so no need to save/restore irq flags.
+ */
+
+ raw_spin_lock(&lpriv->lock);
+
+ WRITE_ONCE(vec->enable, true);
+ bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
+ __imsic_remote_sync(lpriv, vec->cpu);
+
+ raw_spin_unlock(&lpriv->lock);
+}
+
+static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsic_vector *vec,
+ bool new_enable, struct imsic_vector *new_move)
+{
+ unsigned long flags;
+ bool enabled;
+
+ raw_spin_lock_irqsave(&lpriv->lock, flags);
+
+ /* Update enable and move details */
+ enabled = READ_ONCE(vec->enable);
+ WRITE_ONCE(vec->enable, new_enable);
+ WRITE_ONCE(vec->move, new_move);
+
+ /* Mark the vector as dirty and synchronize */
+ bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
+ __imsic_remote_sync(lpriv, vec->cpu);
+
+ raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+
+ return enabled;
+}
+
+void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec)
+{
+ struct imsic_local_priv *old_lpriv, *new_lpriv;
+ bool enabled;
+
+ if (WARN_ON_ONCE(old_vec->cpu == new_vec->cpu))
+ return;
+
+ old_lpriv = per_cpu_ptr(imsic->lpriv, old_vec->cpu);
+ if (WARN_ON_ONCE(&old_lpriv->vectors[old_vec->local_id] != old_vec))
+ return;
+
+ new_lpriv = per_cpu_ptr(imsic->lpriv, new_vec->cpu);
+ if (WARN_ON_ONCE(&new_lpriv->vectors[new_vec->local_id] != new_vec))
+ return;
+
+ /*
+ * Move and re-trigger the new vector based on the pending
+ * state of the old vector because we might get a device
+ * interrupt on the old vector while device was being moved
+ * to the new vector.
+ */
+ enabled = imsic_vector_move_update(old_lpriv, old_vec, false, new_vec);
+ imsic_vector_move_update(new_lpriv, new_vec, enabled, new_vec);
+}
+
+#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+void imsic_vector_debug_show(struct seq_file *m, struct imsic_vector *vec, int ind)
+{
+ struct imsic_local_priv *lpriv;
+ struct imsic_vector *mvec;
+ bool is_enabled;
+
+ lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
+ if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec))
+ return;
+
+ is_enabled = imsic_vector_isenabled(vec);
+ mvec = imsic_vector_get_move(vec);
+
+ seq_printf(m, "%*starget_cpu : %5u\n", ind, "", vec->cpu);
+ seq_printf(m, "%*starget_local_id : %5u\n", ind, "", vec->local_id);
+ seq_printf(m, "%*sis_reserved : %5u\n", ind, "",
+ (vec->local_id <= IMSIC_IPI_ID) ? 1 : 0);
+ seq_printf(m, "%*sis_enabled : %5u\n", ind, "", is_enabled ? 1 : 0);
+ seq_printf(m, "%*sis_move_pending : %5u\n", ind, "", mvec ? 1 : 0);
+ if (mvec) {
+ seq_printf(m, "%*smove_cpu : %5u\n", ind, "", mvec->cpu);
+ seq_printf(m, "%*smove_local_id : %5u\n", ind, "", mvec->local_id);
+ }
+}
+
+void imsic_vector_debug_show_summary(struct seq_file *m, int ind)
+{
+ irq_matrix_debug_show(m, imsic->matrix, ind);
+}
+#endif
+
+struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int local_id)
+{
+ struct imsic_local_priv *lpriv = per_cpu_ptr(imsic->lpriv, cpu);
+
+ if (!lpriv || imsic->global.nr_ids < local_id)
+ return NULL;
+
+ return &lpriv->vectors[local_id];
+}
+
+struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask *mask)
+{
+ struct imsic_vector *vec = NULL;
+ struct imsic_local_priv *lpriv;
+ unsigned long flags;
+ unsigned int cpu;
+ int local_id;
+
+ raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
+ local_id = irq_matrix_alloc(imsic->matrix, mask, false, &cpu);
+ raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
+ if (local_id < 0)
+ return NULL;
+
+ lpriv = per_cpu_ptr(imsic->lpriv, cpu);
+ vec = &lpriv->vectors[local_id];
+ vec->hwirq = hwirq;
+ vec->enable = false;
+ vec->move = NULL;
+
+ return vec;
+}
+
+void imsic_vector_free(struct imsic_vector *vec)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
+ vec->hwirq = UINT_MAX;
+ irq_matrix_free(imsic->matrix, vec->cpu, vec->local_id, false);
+ raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
+}
+
+static void __init imsic_local_cleanup(void)
+{
+ struct imsic_local_priv *lpriv;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ lpriv = per_cpu_ptr(imsic->lpriv, cpu);
+
+ bitmap_free(lpriv->dirty_bitmap);
+ kfree(lpriv->vectors);
+ }
+
+ free_percpu(imsic->lpriv);
+}
+
+static int __init imsic_local_init(void)
+{
+ struct imsic_global_config *global = &imsic->global;
+ struct imsic_local_priv *lpriv;
+ struct imsic_vector *vec;
+ int cpu, i;
+
+ /* Allocate per-CPU private state */
+ imsic->lpriv = alloc_percpu(typeof(*imsic->lpriv));
+ if (!imsic->lpriv)
+ return -ENOMEM;
+
+ /* Setup per-CPU private state */
+ for_each_possible_cpu(cpu) {
+ lpriv = per_cpu_ptr(imsic->lpriv, cpu);
+
+ raw_spin_lock_init(&lpriv->lock);
+
+ /* Allocate dirty bitmap */
+ lpriv->dirty_bitmap = bitmap_zalloc(global->nr_ids + 1, GFP_KERNEL);
+ if (!lpriv->dirty_bitmap)
+ goto fail_local_cleanup;
+
+#ifdef CONFIG_SMP
+ /* Setup lazy timer for synchronization */
+ timer_setup(&lpriv->timer, imsic_local_timer_callback, TIMER_PINNED);
+#endif
+
+ /* Allocate vector array */
+ lpriv->vectors = kcalloc(global->nr_ids + 1, sizeof(*lpriv->vectors),
+ GFP_KERNEL);
+ if (!lpriv->vectors)
+ goto fail_local_cleanup;
+
+ /* Setup vector array */
+ for (i = 0; i <= global->nr_ids; i++) {
+ vec = &lpriv->vectors[i];
+ vec->cpu = cpu;
+ vec->local_id = i;
+ vec->hwirq = UINT_MAX;
+ }
+ }
+
+ return 0;
+
+fail_local_cleanup:
+ imsic_local_cleanup();
+ return -ENOMEM;
+}
+
+void imsic_state_online(void)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
+ irq_matrix_online(imsic->matrix);
+ raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
+}
+
+void imsic_state_offline(void)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
+ irq_matrix_offline(imsic->matrix);
+ raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
+
+#ifdef CONFIG_SMP
+ struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
+
+ raw_spin_lock_irqsave(&lpriv->lock, flags);
+ WARN_ON_ONCE(try_to_del_timer_sync(&lpriv->timer) < 0);
+ raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+#endif
+}
+
+static int __init imsic_matrix_init(void)
+{
+ struct imsic_global_config *global = &imsic->global;
+
+ raw_spin_lock_init(&imsic->matrix_lock);
+ imsic->matrix = irq_alloc_matrix(global->nr_ids + 1,
+ 0, global->nr_ids + 1);
+ if (!imsic->matrix)
+ return -ENOMEM;
+
+ /* Reserve ID#0 because it is special and never implemented */
+ irq_matrix_assign_system(imsic->matrix, 0, false);
+
+ /* Reserve IPI ID because it is special and used internally */
+ irq_matrix_assign_system(imsic->matrix, IMSIC_IPI_ID, false);
+
+ return 0;
+}
+
+static int __init imsic_get_parent_hartid(struct fwnode_handle *fwnode,
+ u32 index, unsigned long *hartid)
+{
+ struct of_phandle_args parent;
+ int rc;
+
+ /*
+ * Currently, only OF fwnode is supported so extend this
+ * function for ACPI support.
+ */
+ if (!is_of_node(fwnode))
+ return -EINVAL;
+
+ rc = of_irq_parse_one(to_of_node(fwnode), index, &parent);
+ if (rc)
+ return rc;
+
+ /*
+ * Skip interrupts other than external interrupts for
+ * current privilege level.
+ */
+ if (parent.args[0] != RV_IRQ_EXT)
+ return -EINVAL;
+
+ return riscv_of_parent_hartid(parent.np, hartid);
+}
+
+static int __init imsic_get_mmio_resource(struct fwnode_handle *fwnode,
+ u32 index, struct resource *res)
+{
+ /*
+ * Currently, only OF fwnode is supported so extend this
+ * function for ACPI support.
+ */
+ if (!is_of_node(fwnode))
+ return -EINVAL;
+
+ return of_address_to_resource(to_of_node(fwnode), index, res);
+}
+
+static int __init imsic_parse_fwnode(struct fwnode_handle *fwnode,
+ struct imsic_global_config *global,
+ u32 *nr_parent_irqs,
+ u32 *nr_mmios)
+{
+ unsigned long hartid;
+ struct resource res;
+ int rc;
+ u32 i;
+
+ /*
+ * Currently, only OF fwnode is supported so extend this
+ * function for ACPI support.
+ */
+ if (!is_of_node(fwnode))
+ return -EINVAL;
+
+ *nr_parent_irqs = 0;
+ *nr_mmios = 0;
+
+ /* Find number of parent interrupts */
+ while (!imsic_get_parent_hartid(fwnode, *nr_parent_irqs, &hartid))
+ (*nr_parent_irqs)++;
+ if (!*nr_parent_irqs) {
+ pr_err("%pfwP: no parent irqs available\n", fwnode);
+ return -EINVAL;
+ }
+
+ /* Find number of guest index bits in MSI address */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,guest-index-bits",
+ &global->guest_index_bits);
+ if (rc)
+ global->guest_index_bits = 0;
+
+ /* Find number of HART index bits */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,hart-index-bits",
+ &global->hart_index_bits);
+ if (rc) {
+ /* Assume default value */
+ global->hart_index_bits = __fls(*nr_parent_irqs);
+ if (BIT(global->hart_index_bits) < *nr_parent_irqs)
+ global->hart_index_bits++;
+ }
+
+ /* Find number of group index bits */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-bits",
+ &global->group_index_bits);
+ if (rc)
+ global->group_index_bits = 0;
+
+ /*
+ * Find first bit position of group index.
+ * If not specified assumed the default APLIC-IMSIC configuration.
+ */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-shift",
+ &global->group_index_shift);
+ if (rc)
+ global->group_index_shift = IMSIC_MMIO_PAGE_SHIFT * 2;
+
+ /* Find number of interrupt identities */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-ids",
+ &global->nr_ids);
+ if (rc) {
+ pr_err("%pfwP: number of interrupt identities not found\n", fwnode);
+ return rc;
+ }
+
+ /* Find number of guest interrupt identities */
+ rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-guest-ids",
+ &global->nr_guest_ids);
+ if (rc)
+ global->nr_guest_ids = global->nr_ids;
+
+ /* Sanity check guest index bits */
+ i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT;
+ if (i < global->guest_index_bits) {
+ pr_err("%pfwP: guest index bits too big\n", fwnode);
+ return -EINVAL;
+ }
+
+ /* Sanity check HART index bits */
+ i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT - global->guest_index_bits;
+ if (i < global->hart_index_bits) {
+ pr_err("%pfwP: HART index bits too big\n", fwnode);
+ return -EINVAL;
+ }
+
+ /* Sanity check group index bits */
+ i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT -
+ global->guest_index_bits - global->hart_index_bits;
+ if (i < global->group_index_bits) {
+ pr_err("%pfwP: group index bits too big\n", fwnode);
+ return -EINVAL;
+ }
+
+ /* Sanity check group index shift */
+ i = global->group_index_bits + global->group_index_shift - 1;
+ if (i >= BITS_PER_LONG) {
+ pr_err("%pfwP: group index shift too big\n", fwnode);
+ return -EINVAL;
+ }
+
+ /* Sanity check number of interrupt identities */
+ if (global->nr_ids < IMSIC_MIN_ID ||
+ global->nr_ids >= IMSIC_MAX_ID ||
+ (global->nr_ids & IMSIC_MIN_ID) != IMSIC_MIN_ID) {
+ pr_err("%pfwP: invalid number of interrupt identities\n", fwnode);
+ return -EINVAL;
+ }
+
+ /* Sanity check number of guest interrupt identities */
+ if (global->nr_guest_ids < IMSIC_MIN_ID ||
+ global->nr_guest_ids >= IMSIC_MAX_ID ||
+ (global->nr_guest_ids & IMSIC_MIN_ID) != IMSIC_MIN_ID) {
+ pr_err("%pfwP: invalid number of guest interrupt identities\n", fwnode);
+ return -EINVAL;
+ }
+
+ /* Compute base address */
+ rc = imsic_get_mmio_resource(fwnode, 0, &res);
+ if (rc) {
+ pr_err("%pfwP: first MMIO resource not found\n", fwnode);
+ return -EINVAL;
+ }
+ global->base_addr = res.start;
+ global->base_addr &= ~(BIT(global->guest_index_bits +
+ global->hart_index_bits +
+ IMSIC_MMIO_PAGE_SHIFT) - 1);
+ global->base_addr &= ~((BIT(global->group_index_bits) - 1) <<
+ global->group_index_shift);
+
+ /* Find number of MMIO register sets */
+ while (!imsic_get_mmio_resource(fwnode, *nr_mmios, &res))
+ (*nr_mmios)++;
+
+ return 0;
+}
+
+int __init imsic_setup_state(struct fwnode_handle *fwnode)
+{
+ u32 i, j, index, nr_parent_irqs, nr_mmios, nr_handlers = 0;
+ struct imsic_global_config *global;
+ struct imsic_local_config *local;
+ void __iomem **mmios_va = NULL;
+ struct resource *mmios = NULL;
+ unsigned long reloff, hartid;
+ phys_addr_t base_addr;
+ int rc, cpu;
+
+ /*
+ * Only one IMSIC instance allowed in a platform for clean
+ * implementation of SMP IRQ affinity and per-CPU IPIs.
+ *
+ * This means on a multi-socket (or multi-die) platform we
+ * will have multiple MMIO regions for one IMSIC instance.
+ */
+ if (imsic) {
+ pr_err("%pfwP: already initialized hence ignoring\n", fwnode);
+ return -EALREADY;
+ }
+
+ if (!riscv_isa_extension_available(NULL, SxAIA)) {
+ pr_err("%pfwP: AIA support not available\n", fwnode);
+ return -ENODEV;
+ }
+
+ imsic = kzalloc(sizeof(*imsic), GFP_KERNEL);
+ if (!imsic)
+ return -ENOMEM;
+ imsic->fwnode = fwnode;
+ global = &imsic->global;
+
+ global->local = alloc_percpu(typeof(*global->local));
+ if (!global->local) {
+ rc = -ENOMEM;
+ goto out_free_priv;
+ }
+
+ /* Parse IMSIC fwnode */
+ rc = imsic_parse_fwnode(fwnode, global, &nr_parent_irqs, &nr_mmios);
+ if (rc)
+ goto out_free_local;
+
+ /* Allocate MMIO resource array */
+ mmios = kcalloc(nr_mmios, sizeof(*mmios), GFP_KERNEL);
+ if (!mmios) {
+ rc = -ENOMEM;
+ goto out_free_local;
+ }
+
+ /* Allocate MMIO virtual address array */
+ mmios_va = kcalloc(nr_mmios, sizeof(*mmios_va), GFP_KERNEL);
+ if (!mmios_va) {
+ rc = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ /* Parse and map MMIO register sets */
+ for (i = 0; i < nr_mmios; i++) {
+ rc = imsic_get_mmio_resource(fwnode, i, &mmios[i]);
+ if (rc) {
+ pr_err("%pfwP: unable to parse MMIO regset %d\n", fwnode, i);
+ goto out_iounmap;
+ }
+
+ base_addr = mmios[i].start;
+ base_addr &= ~(BIT(global->guest_index_bits +
+ global->hart_index_bits +
+ IMSIC_MMIO_PAGE_SHIFT) - 1);
+ base_addr &= ~((BIT(global->group_index_bits) - 1) <<
+ global->group_index_shift);
+ if (base_addr != global->base_addr) {
+ rc = -EINVAL;
+ pr_err("%pfwP: address mismatch for regset %d\n", fwnode, i);
+ goto out_iounmap;
+ }
+
+ mmios_va[i] = ioremap(mmios[i].start, resource_size(&mmios[i]));
+ if (!mmios_va[i]) {
+ rc = -EIO;
+ pr_err("%pfwP: unable to map MMIO regset %d\n", fwnode, i);
+ goto out_iounmap;
+ }
+ }
+
+ /* Initialize local (or per-CPU )state */
+ rc = imsic_local_init();
+ if (rc) {
+ pr_err("%pfwP: failed to initialize local state\n",
+ fwnode);
+ goto out_iounmap;
+ }
+
+ /* Configure handlers for target CPUs */
+ for (i = 0; i < nr_parent_irqs; i++) {
+ rc = imsic_get_parent_hartid(fwnode, i, &hartid);
+ if (rc) {
+ pr_warn("%pfwP: hart ID for parent irq%d not found\n", fwnode, i);
+ continue;
+ }
+
+ cpu = riscv_hartid_to_cpuid(hartid);
+ if (cpu < 0) {
+ pr_warn("%pfwP: invalid cpuid for parent irq%d\n", fwnode, i);
+ continue;
+ }
+
+ /* Find MMIO location of MSI page */
+ index = nr_mmios;
+ reloff = i * BIT(global->guest_index_bits) *
+ IMSIC_MMIO_PAGE_SZ;
+ for (j = 0; nr_mmios; j++) {
+ if (reloff < resource_size(&mmios[j])) {
+ index = j;
+ break;
+ }
+
+ /*
+ * MMIO region size may not be aligned to
+ * BIT(global->guest_index_bits) * IMSIC_MMIO_PAGE_SZ
+ * if holes are present.
+ */
+ reloff -= ALIGN(resource_size(&mmios[j]),
+ BIT(global->guest_index_bits) * IMSIC_MMIO_PAGE_SZ);
+ }
+ if (index >= nr_mmios) {
+ pr_warn("%pfwP: MMIO not found for parent irq%d\n", fwnode, i);
+ continue;
+ }
+
+ local = per_cpu_ptr(global->local, cpu);
+ local->msi_pa = mmios[index].start + reloff;
+ local->msi_va = mmios_va[index] + reloff;
+
+ nr_handlers++;
+ }
+
+ /* If no CPU handlers found then can't take interrupts */
+ if (!nr_handlers) {
+ pr_err("%pfwP: No CPU handlers found\n", fwnode);
+ rc = -ENODEV;
+ goto out_local_cleanup;
+ }
+
+ /* Initialize matrix allocator */
+ rc = imsic_matrix_init();
+ if (rc) {
+ pr_err("%pfwP: failed to create matrix allocator\n", fwnode);
+ goto out_local_cleanup;
+ }
+
+ /* We don't need MMIO arrays anymore so let's free-up */
+ kfree(mmios_va);
+ kfree(mmios);
+
+ return 0;
+
+out_local_cleanup:
+ imsic_local_cleanup();
+out_iounmap:
+ for (i = 0; i < nr_mmios; i++) {
+ if (mmios_va[i])
+ iounmap(mmios_va[i]);
+ }
+ kfree(mmios_va);
+ kfree(mmios);
+out_free_local:
+ free_percpu(imsic->global.local);
+out_free_priv:
+ kfree(imsic);
+ imsic = NULL;
+ return rc;
+}
diff --git a/drivers/irqchip/irq-riscv-imsic-state.h b/drivers/irqchip/irq-riscv-imsic-state.h
new file mode 100644
index 000000000000..5ae2f69b035b
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-imsic-state.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#ifndef _IRQ_RISCV_IMSIC_STATE_H
+#define _IRQ_RISCV_IMSIC_STATE_H
+
+#include <linux/irqchip/riscv-imsic.h>
+#include <linux/irqdomain.h>
+#include <linux/fwnode.h>
+#include <linux/timer.h>
+
+#define IMSIC_IPI_ID 1
+#define IMSIC_NR_IPI 8
+
+struct imsic_vector {
+ /* Fixed details of the vector */
+ unsigned int cpu;
+ unsigned int local_id;
+ /* Details saved by driver in the vector */
+ unsigned int hwirq;
+ /* Details accessed using local lock held */
+ bool enable;
+ struct imsic_vector *move;
+};
+
+struct imsic_local_priv {
+ /* Local lock to protect vector enable/move variables and dirty bitmap */
+ raw_spinlock_t lock;
+
+ /* Local dirty bitmap for synchronization */
+ unsigned long *dirty_bitmap;
+
+#ifdef CONFIG_SMP
+ /* Local timer for synchronization */
+ struct timer_list timer;
+#endif
+
+ /* Local vector table */
+ struct imsic_vector *vectors;
+};
+
+struct imsic_priv {
+ /* Device details */
+ struct fwnode_handle *fwnode;
+
+ /* Global configuration common for all HARTs */
+ struct imsic_global_config global;
+
+ /* Per-CPU state */
+ struct imsic_local_priv __percpu *lpriv;
+
+ /* State of IRQ matrix allocator */
+ raw_spinlock_t matrix_lock;
+ struct irq_matrix *matrix;
+
+ /* IRQ domains (created by platform driver) */
+ struct irq_domain *base_domain;
+};
+
+extern struct imsic_priv *imsic;
+
+void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend, bool val);
+
+static inline void __imsic_id_set_enable(unsigned long id)
+{
+ __imsic_eix_update(id, 1, false, true);
+}
+
+static inline void __imsic_id_clear_enable(unsigned long id)
+{
+ __imsic_eix_update(id, 1, false, false);
+}
+
+void imsic_local_sync_all(void);
+void imsic_local_delivery(bool enable);
+
+void imsic_vector_mask(struct imsic_vector *vec);
+void imsic_vector_unmask(struct imsic_vector *vec);
+
+static inline bool imsic_vector_isenabled(struct imsic_vector *vec)
+{
+ return READ_ONCE(vec->enable);
+}
+
+static inline struct imsic_vector *imsic_vector_get_move(struct imsic_vector *vec)
+{
+ return READ_ONCE(vec->move);
+}
+
+void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec);
+
+struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int local_id);
+
+struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask *mask);
+void imsic_vector_free(struct imsic_vector *vector);
+
+void imsic_vector_debug_show(struct seq_file *m, struct imsic_vector *vec, int ind);
+void imsic_vector_debug_show_summary(struct seq_file *m, int ind);
+
+void imsic_state_online(void);
+void imsic_state_offline(void);
+int imsic_setup_state(struct fwnode_handle *fwnode);
+int imsic_irqdomain_init(void);
+
+#endif
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index f3d4cb9e34f7..8fb183ced1e7 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -164,15 +164,12 @@ static int plic_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
{
unsigned int cpu;
- struct cpumask amask;
struct plic_priv *priv = irq_data_get_irq_chip_data(d);
- cpumask_and(&amask, &priv->lmask, mask_val);
-
if (force)
- cpu = cpumask_first(&amask);
+ cpu = cpumask_first_and(&priv->lmask, mask_val);
else
- cpu = cpumask_any_and(&amask, cpu_online_mask);
+ cpu = cpumask_first_and_and(&priv->lmask, mask_val, cpu_online_mask);
if (cpu >= nr_cpu_ids)
return -EINVAL;
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 26a5193d0ae4..2cc9f3b7d669 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -19,13 +19,26 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
-#include <linux/syscore_ops.h>
+#include <linux/pm.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
-#define IRQS_PER_BANK 32
+#define IRQS_PER_BANK 32
-#define HWSPNLCK_TIMEOUT 1000 /* usec */
+#define HWSPNLCK_TIMEOUT 1000 /* usec */
+
+#define EXTI_EnCIDCFGR(n) (0x180 + (n) * 4)
+#define EXTI_HWCFGR1 0x3f0
+
+/* Register: EXTI_EnCIDCFGR(n) */
+#define EXTI_CIDCFGR_CFEN_MASK BIT(0)
+#define EXTI_CIDCFGR_CID_MASK GENMASK(6, 4)
+#define EXTI_CIDCFGR_CID_SHIFT 4
+
+/* Register: EXTI_HWCFGR1 */
+#define EXTI_HWCFGR1_CIDWIDTH_MASK GENMASK(27, 24)
+
+#define EXTI_CID1 1
struct stm32_exti_bank {
u32 imr_ofst;
@@ -36,6 +49,7 @@ struct stm32_exti_bank {
u32 rpr_ofst;
u32 fpr_ofst;
u32 trg_ofst;
+ u32 seccfgr_ofst;
};
#define UNDEF_REG ~0
@@ -54,17 +68,18 @@ struct stm32_exti_chip_data {
u32 mask_cache;
u32 rtsr_cache;
u32 ftsr_cache;
+ u32 event_reserved;
};
struct stm32_exti_host_data {
void __iomem *base;
+ struct device *dev;
struct stm32_exti_chip_data *chips_data;
const struct stm32_exti_drv_data *drv_data;
struct hwspinlock *hwlock;
+ bool dt_has_irqs_desc; /* skip internal desc_irqs array and get it from DT */
};
-static struct stm32_exti_host_data *stm32_host_data;
-
static const struct stm32_exti_bank stm32f4xx_exti_b1 = {
.imr_ofst = 0x00,
.emr_ofst = 0x04,
@@ -74,6 +89,7 @@ static const struct stm32_exti_bank stm32f4xx_exti_b1 = {
.rpr_ofst = 0x14,
.fpr_ofst = UNDEF_REG,
.trg_ofst = UNDEF_REG,
+ .seccfgr_ofst = UNDEF_REG,
};
static const struct stm32_exti_bank *stm32f4xx_exti_banks[] = {
@@ -94,6 +110,7 @@ static const struct stm32_exti_bank stm32h7xx_exti_b1 = {
.rpr_ofst = 0x88,
.fpr_ofst = UNDEF_REG,
.trg_ofst = UNDEF_REG,
+ .seccfgr_ofst = UNDEF_REG,
};
static const struct stm32_exti_bank stm32h7xx_exti_b2 = {
@@ -105,6 +122,7 @@ static const struct stm32_exti_bank stm32h7xx_exti_b2 = {
.rpr_ofst = 0x98,
.fpr_ofst = UNDEF_REG,
.trg_ofst = UNDEF_REG,
+ .seccfgr_ofst = UNDEF_REG,
};
static const struct stm32_exti_bank stm32h7xx_exti_b3 = {
@@ -116,6 +134,7 @@ static const struct stm32_exti_bank stm32h7xx_exti_b3 = {
.rpr_ofst = 0xA8,
.fpr_ofst = UNDEF_REG,
.trg_ofst = UNDEF_REG,
+ .seccfgr_ofst = UNDEF_REG,
};
static const struct stm32_exti_bank *stm32h7xx_exti_banks[] = {
@@ -138,6 +157,7 @@ static const struct stm32_exti_bank stm32mp1_exti_b1 = {
.rpr_ofst = 0x0C,
.fpr_ofst = 0x10,
.trg_ofst = 0x3EC,
+ .seccfgr_ofst = 0x14,
};
static const struct stm32_exti_bank stm32mp1_exti_b2 = {
@@ -149,6 +169,7 @@ static const struct stm32_exti_bank stm32mp1_exti_b2 = {
.rpr_ofst = 0x2C,
.fpr_ofst = 0x30,
.trg_ofst = 0x3E8,
+ .seccfgr_ofst = 0x34,
};
static const struct stm32_exti_bank stm32mp1_exti_b3 = {
@@ -160,6 +181,7 @@ static const struct stm32_exti_bank stm32mp1_exti_b3 = {
.rpr_ofst = 0x4C,
.fpr_ofst = 0x50,
.trg_ofst = 0x3E4,
+ .seccfgr_ofst = 0x54,
};
static const struct stm32_exti_bank *stm32mp1_exti_banks[] = {
@@ -322,7 +344,7 @@ static void stm32_irq_handler(struct irq_desc *desc)
while ((pending = stm32_exti_pending(gc))) {
for_each_set_bit(n, &pending, IRQS_PER_BANK)
generic_handle_domain_irq(domain, irq_base + n);
- }
+ }
}
chained_irq_exit(chip, desc);
@@ -621,50 +643,32 @@ static int stm32_exti_h_set_affinity(struct irq_data *d,
return IRQ_SET_MASK_OK_DONE;
}
-static int __maybe_unused stm32_exti_h_suspend(void)
+static int stm32_exti_h_suspend(struct device *dev)
{
+ struct stm32_exti_host_data *host_data = dev_get_drvdata(dev);
struct stm32_exti_chip_data *chip_data;
int i;
- for (i = 0; i < stm32_host_data->drv_data->bank_nr; i++) {
- chip_data = &stm32_host_data->chips_data[i];
- raw_spin_lock(&chip_data->rlock);
+ for (i = 0; i < host_data->drv_data->bank_nr; i++) {
+ chip_data = &host_data->chips_data[i];
stm32_chip_suspend(chip_data, chip_data->wake_active);
- raw_spin_unlock(&chip_data->rlock);
}
return 0;
}
-static void __maybe_unused stm32_exti_h_resume(void)
+static int stm32_exti_h_resume(struct device *dev)
{
+ struct stm32_exti_host_data *host_data = dev_get_drvdata(dev);
struct stm32_exti_chip_data *chip_data;
int i;
- for (i = 0; i < stm32_host_data->drv_data->bank_nr; i++) {
- chip_data = &stm32_host_data->chips_data[i];
- raw_spin_lock(&chip_data->rlock);
+ for (i = 0; i < host_data->drv_data->bank_nr; i++) {
+ chip_data = &host_data->chips_data[i];
stm32_chip_resume(chip_data, chip_data->mask_cache);
- raw_spin_unlock(&chip_data->rlock);
}
-}
-static struct syscore_ops stm32_exti_h_syscore_ops = {
-#ifdef CONFIG_PM_SLEEP
- .suspend = stm32_exti_h_suspend,
- .resume = stm32_exti_h_resume,
-#endif
-};
-
-static void stm32_exti_h_syscore_init(struct stm32_exti_host_data *host_data)
-{
- stm32_host_data = host_data;
- register_syscore_ops(&stm32_exti_h_syscore_ops);
-}
-
-static void stm32_exti_h_syscore_deinit(void)
-{
- unregister_syscore_ops(&stm32_exti_h_syscore_ops);
+ return 0;
}
static int stm32_exti_h_retrigger(struct irq_data *d)
@@ -725,12 +729,35 @@ static int stm32_exti_h_domain_alloc(struct irq_domain *dm,
bank = hwirq / IRQS_PER_BANK;
chip_data = &host_data->chips_data[bank];
+ /* Check if event is reserved (Secure) */
+ if (chip_data->event_reserved & BIT(hwirq % IRQS_PER_BANK)) {
+ dev_err(host_data->dev, "event %lu is reserved, secure\n", hwirq);
+ return -EPERM;
+ }
+
event_trg = readl_relaxed(host_data->base + chip_data->reg_bank->trg_ofst);
chip = (event_trg & BIT(hwirq % IRQS_PER_BANK)) ?
&stm32_exti_h_chip : &stm32_exti_h_chip_direct;
irq_domain_set_hwirq_and_chip(dm, virq, hwirq, chip, chip_data);
+ if (host_data->dt_has_irqs_desc) {
+ struct of_phandle_args out_irq;
+ int ret;
+
+ ret = of_irq_parse_one(host_data->dev->of_node, hwirq, &out_irq);
+ if (ret)
+ return ret;
+ /* we only support one parent, so far */
+ if (of_node_to_fwnode(out_irq.np) != dm->parent->fwnode)
+ return -EINVAL;
+
+ of_phandle_args_to_fwspec(out_irq.np, out_irq.args,
+ out_irq.args_count, &p_fwspec);
+
+ return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec);
+ }
+
if (!host_data->drv_data->desc_irqs)
return -EINVAL;
@@ -771,8 +798,6 @@ stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd,
goto free_chips_data;
}
- stm32_host_data = host_data;
-
return host_data;
free_chips_data:
@@ -807,6 +832,10 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
if (stm32_bank->emr_ofst != UNDEF_REG)
writel_relaxed(0, base + stm32_bank->emr_ofst);
+ /* reserve Secure events */
+ if (stm32_bank->seccfgr_ofst != UNDEF_REG)
+ chip_data->event_reserved = readl_relaxed(base + stm32_bank->seccfgr_ofst);
+
pr_info("%pOF: bank%d\n", node, bank_idx);
return chip_data;
@@ -891,6 +920,27 @@ static const struct irq_domain_ops stm32_exti_h_domain_ops = {
.xlate = irq_domain_xlate_twocell,
};
+static void stm32_exti_check_rif(struct stm32_exti_host_data *host_data)
+{
+ unsigned int bank, i, event;
+ u32 cid, cidcfgr, hwcfgr1;
+
+ /* quit on CID not supported */
+ hwcfgr1 = readl_relaxed(host_data->base + EXTI_HWCFGR1);
+ if ((hwcfgr1 & EXTI_HWCFGR1_CIDWIDTH_MASK) == 0)
+ return;
+
+ for (bank = 0; bank < host_data->drv_data->bank_nr; bank++) {
+ for (i = 0; i < IRQS_PER_BANK; i++) {
+ event = bank * IRQS_PER_BANK + i;
+ cidcfgr = readl_relaxed(host_data->base + EXTI_EnCIDCFGR(event));
+ cid = (cidcfgr & EXTI_CIDCFGR_CID_MASK) >> EXTI_CIDCFGR_CID_SHIFT;
+ if ((cidcfgr & EXTI_CIDCFGR_CFEN_MASK) && cid != EXTI_CID1)
+ host_data->chips_data[bank].event_reserved |= BIT(i);
+ }
+ }
+}
+
static void stm32_exti_remove_irq(void *data)
{
struct irq_domain *domain = data;
@@ -898,11 +948,6 @@ static void stm32_exti_remove_irq(void *data)
irq_domain_remove(domain);
}
-static void stm32_exti_remove(struct platform_device *pdev)
-{
- stm32_exti_h_syscore_deinit();
-}
-
static int stm32_exti_probe(struct platform_device *pdev)
{
int ret, i;
@@ -916,6 +961,9 @@ static int stm32_exti_probe(struct platform_device *pdev)
if (!host_data)
return -ENOMEM;
+ dev_set_drvdata(dev, host_data);
+ host_data->dev = dev;
+
/* check for optional hwspinlock which may be not available yet */
ret = of_hwspin_lock_get_id(np, 0);
if (ret == -EPROBE_DEFER)
@@ -955,6 +1003,8 @@ static int stm32_exti_probe(struct platform_device *pdev)
for (i = 0; i < drv_data->bank_nr; i++)
stm32_exti_chip_init(host_data, i, np);
+ stm32_exti_check_rif(host_data);
+
parent_domain = irq_find_host(of_irq_find_parent(np));
if (!parent_domain) {
dev_err(dev, "GIC interrupt-parent not found\n");
@@ -975,7 +1025,8 @@ static int stm32_exti_probe(struct platform_device *pdev)
if (ret)
return ret;
- stm32_exti_h_syscore_init(host_data);
+ if (of_property_read_bool(np, "interrupts-extended"))
+ host_data->dt_has_irqs_desc = true;
return 0;
}
@@ -988,12 +1039,16 @@ static const struct of_device_id stm32_exti_ids[] = {
};
MODULE_DEVICE_TABLE(of, stm32_exti_ids);
+static const struct dev_pm_ops stm32_exti_dev_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(stm32_exti_h_suspend, stm32_exti_h_resume)
+};
+
static struct platform_driver stm32_exti_driver = {
.probe = stm32_exti_probe,
- .remove_new = stm32_exti_remove,
.driver = {
.name = "stm32_exti",
.of_match_table = stm32_exti_ids,
+ .pm = &stm32_exti_dev_pm_ops,
},
};
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index e760b1278143..bb92fd85e975 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -192,7 +192,6 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
gc->chip_types[0].regs.type = reg_offs->ctrl;
gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
- gc->chip_types[1].chip.name = gc->chip_types[0].chip.name;
gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c
index 680586354d12..d59bfbe8c6d0 100644
--- a/drivers/irqchip/irq-tb10x.c
+++ b/drivers/irqchip/irq-tb10x.c
@@ -150,7 +150,6 @@ static int __init of_tb10x_init_irq(struct device_node *ictl,
gc->chip_types[0].regs.mask = AB_IRQCTL_INT_ENABLE;
gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
- gc->chip_types[1].chip.name = gc->chip_types[0].chip.name;
gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 2776ca5fc33f..b215b28cad7b 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -401,23 +401,23 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
}
static int data_sock_setsockopt(struct socket *sock, int level, int optname,
- sockptr_t optval, unsigned int len)
+ sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int err = 0, opt = 0;
if (*debug & DEBUG_SOCKET)
printk(KERN_DEBUG "%s(%p, %d, %x, optval, %d)\n", __func__, sock,
- level, optname, len);
+ level, optname, optlen);
lock_sock(sk);
switch (optname) {
case MISDN_TIME_STAMP:
- if (copy_from_sockptr(&opt, optval, sizeof(int))) {
- err = -EFAULT;
+ err = copy_safe_from_sockptr(&opt, sizeof(opt),
+ optval, optlen);
+ if (err)
break;
- }
if (opt)
_pms(sk)->cmask |= MISDN_TIME_STAMP;
diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c
index db9270da5b8e..b6ddf1d47cb4 100644
--- a/drivers/macintosh/via-macii.c
+++ b/drivers/macintosh/via-macii.c
@@ -140,24 +140,19 @@ static int macii_probe(void)
/* Initialize the driver */
static int macii_init(void)
{
- unsigned long flags;
int err;
- local_irq_save(flags);
-
err = macii_init_via();
if (err)
- goto out;
+ return err;
err = request_irq(IRQ_MAC_ADB, macii_interrupt, 0, "ADB",
macii_interrupt);
if (err)
- goto out;
+ return err;
macii_state = idle;
-out:
- local_irq_restore(flags);
- return err;
+ return 0;
}
/* initialize the hardware */
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 2bba4d6aaaa2..463eb13bd0b2 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -54,7 +54,7 @@ void bch_dump_bucket(struct btree_keys *b)
int __bch_count_data(struct btree_keys *b)
{
unsigned int ret = 0;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct bkey *k;
if (b->ops->is_extents)
@@ -67,7 +67,7 @@ void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
{
va_list args;
struct bkey *k, *p = NULL;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
const char *err;
for_each_key(b, k, &iter) {
@@ -879,7 +879,7 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
struct bset *i = bset_tree_last(b)->data;
struct bkey *m, *prev = NULL;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct bkey preceding_key_on_stack = ZERO_KEY;
struct bkey *preceding_key_p = &preceding_key_on_stack;
@@ -895,9 +895,9 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
else
preceding_key(k, &preceding_key_p);
- m = bch_btree_iter_init(b, &iter, preceding_key_p);
+ m = bch_btree_iter_stack_init(b, &iter, preceding_key_p);
- if (b->ops->insert_fixup(b, k, &iter, replace_key))
+ if (b->ops->insert_fixup(b, k, &iter.iter, replace_key))
return status;
status = BTREE_INSERT_STATUS_INSERT;
@@ -1100,33 +1100,33 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
btree_iter_cmp));
}
-static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
- struct btree_iter *iter,
- struct bkey *search,
- struct bset_tree *start)
+static struct bkey *__bch_btree_iter_stack_init(struct btree_keys *b,
+ struct btree_iter_stack *iter,
+ struct bkey *search,
+ struct bset_tree *start)
{
struct bkey *ret = NULL;
- iter->size = ARRAY_SIZE(iter->data);
- iter->used = 0;
+ iter->iter.size = ARRAY_SIZE(iter->stack_data);
+ iter->iter.used = 0;
#ifdef CONFIG_BCACHE_DEBUG
- iter->b = b;
+ iter->iter.b = b;
#endif
for (; start <= bset_tree_last(b); start++) {
ret = bch_bset_search(b, start, search);
- bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
+ bch_btree_iter_push(&iter->iter, ret, bset_bkey_last(start->data));
}
return ret;
}
-struct bkey *bch_btree_iter_init(struct btree_keys *b,
- struct btree_iter *iter,
+struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
+ struct btree_iter_stack *iter,
struct bkey *search)
{
- return __bch_btree_iter_init(b, iter, search, b->set);
+ return __bch_btree_iter_stack_init(b, iter, search, b->set);
}
static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
@@ -1293,10 +1293,10 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
struct bset_sort_state *state)
{
size_t order = b->page_order, keys = 0;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
int oldsize = bch_count_data(b);
- __bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
+ __bch_btree_iter_stack_init(b, &iter, NULL, &b->set[start]);
if (start) {
unsigned int i;
@@ -1307,7 +1307,7 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
order = get_order(__set_bytes(b->set->data, keys));
}
- __btree_sort(b, &iter, start, order, false, state);
+ __btree_sort(b, &iter.iter, start, order, false, state);
EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
}
@@ -1323,11 +1323,11 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
struct bset_sort_state *state)
{
uint64_t start_time = local_clock();
- struct btree_iter iter;
+ struct btree_iter_stack iter;
- bch_btree_iter_init(b, &iter, NULL);
+ bch_btree_iter_stack_init(b, &iter, NULL);
- btree_mergesort(b, new->set->data, &iter, false, true);
+ btree_mergesort(b, new->set->data, &iter.iter, false, true);
bch_time_stats_update(&state->time, start_time);
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index d795c84246b0..011f6062c4c0 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -321,7 +321,14 @@ struct btree_iter {
#endif
struct btree_iter_set {
struct bkey *k, *end;
- } data[MAX_BSETS];
+ } data[];
+};
+
+/* Fixed-size btree_iter that can be allocated on the stack */
+
+struct btree_iter_stack {
+ struct btree_iter iter;
+ struct btree_iter_set stack_data[MAX_BSETS];
};
typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
@@ -333,9 +340,9 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
struct bkey *end);
-struct bkey *bch_btree_iter_init(struct btree_keys *b,
- struct btree_iter *iter,
- struct bkey *search);
+struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
+ struct btree_iter_stack *iter,
+ struct bkey *search);
struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
const struct bkey *search);
@@ -350,13 +357,14 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b,
return search ? __bch_bset_search(b, t, search) : t->data->start;
}
-#define for_each_key_filter(b, k, iter, filter) \
- for (bch_btree_iter_init((b), (iter), NULL); \
- ((k) = bch_btree_iter_next_filter((iter), (b), filter));)
+#define for_each_key_filter(b, k, stack_iter, filter) \
+ for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \
+ ((k) = bch_btree_iter_next_filter(&((stack_iter)->iter), (b), \
+ filter));)
-#define for_each_key(b, k, iter) \
- for (bch_btree_iter_init((b), (iter), NULL); \
- ((k) = bch_btree_iter_next(iter));)
+#define for_each_key(b, k, stack_iter) \
+ for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \
+ ((k) = bch_btree_iter_next(&((stack_iter)->iter)));)
/* Sorting */
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 196cdacce38f..d011a7154d33 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1309,7 +1309,7 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
uint8_t stale = 0;
unsigned int keys = 0, good_keys = 0;
struct bkey *k;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct bset_tree *t;
gc->nodes++;
@@ -1570,7 +1570,7 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
static unsigned int btree_gc_count_keys(struct btree *b)
{
struct bkey *k;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
unsigned int ret = 0;
for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
@@ -1611,17 +1611,18 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
int ret = 0;
bool should_rewrite;
struct bkey *k;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct gc_merge_info r[GC_MERGE_NODES];
struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
- bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
+ bch_btree_iter_stack_init(&b->keys, &iter, &b->c->gc_done);
for (i = r; i < r + ARRAY_SIZE(r); i++)
i->b = ERR_PTR(-EINTR);
while (1) {
- k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
+ k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
+ bch_ptr_bad);
if (k) {
r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
true, b);
@@ -1911,7 +1912,7 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
{
int ret = 0;
struct bkey *k, *p = NULL;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
bch_initial_mark_key(b->c, b->level, k);
@@ -1919,10 +1920,10 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
bch_initial_mark_key(b->c, b->level + 1, &b->key);
if (b->level) {
- bch_btree_iter_init(&b->keys, &iter, NULL);
+ bch_btree_iter_stack_init(&b->keys, &iter, NULL);
do {
- k = bch_btree_iter_next_filter(&iter, &b->keys,
+ k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
bch_ptr_bad);
if (k) {
btree_node_prefetch(b, k);
@@ -1950,7 +1951,7 @@ static int bch_btree_check_thread(void *arg)
struct btree_check_info *info = arg;
struct btree_check_state *check_state = info->state;
struct cache_set *c = check_state->c;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct bkey *k, *p;
int cur_idx, prev_idx, skip_nr;
@@ -1959,8 +1960,8 @@ static int bch_btree_check_thread(void *arg)
ret = 0;
/* root node keys are checked before thread created */
- bch_btree_iter_init(&c->root->keys, &iter, NULL);
- k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
+ bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
+ k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
BUG_ON(!k);
p = k;
@@ -1978,7 +1979,7 @@ static int bch_btree_check_thread(void *arg)
skip_nr = cur_idx - prev_idx;
while (skip_nr) {
- k = bch_btree_iter_next_filter(&iter,
+ k = bch_btree_iter_next_filter(&iter.iter,
&c->root->keys,
bch_ptr_bad);
if (k)
@@ -2051,7 +2052,7 @@ int bch_btree_check(struct cache_set *c)
int ret = 0;
int i;
struct bkey *k = NULL;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct btree_check_state check_state;
/* check and mark root node keys */
@@ -2547,11 +2548,11 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
if (b->level) {
struct bkey *k;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
- bch_btree_iter_init(&b->keys, &iter, from);
+ bch_btree_iter_stack_init(&b->keys, &iter, from);
- while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
+ while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
bch_ptr_bad))) {
ret = bcache_btree(map_nodes_recurse, k, b,
op, from, fn, flags);
@@ -2580,11 +2581,12 @@ int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
{
int ret = MAP_CONTINUE;
struct bkey *k;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
- bch_btree_iter_init(&b->keys, &iter, from);
+ bch_btree_iter_stack_init(&b->keys, &iter, from);
- while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
+ while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
+ bch_ptr_bad))) {
ret = !b->level
? fn(op, b, k)
: bcache_btree(map_keys_recurse, k,
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 330bcd9ea4a9..cba09660148a 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -881,8 +881,8 @@ static void bcache_device_free(struct bcache_device *d)
bcache_device_detach(d);
if (disk) {
- ida_simple_remove(&bcache_device_idx,
- first_minor_to_idx(disk->first_minor));
+ ida_free(&bcache_device_idx,
+ first_minor_to_idx(disk->first_minor));
put_disk(disk);
}
@@ -940,8 +940,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
if (!d->full_dirty_stripes)
goto out_free_stripe_sectors_dirty;
- idx = ida_simple_get(&bcache_device_idx, 0,
- BCACHE_DEVICE_IDX_MAX, GFP_KERNEL);
+ idx = ida_alloc_max(&bcache_device_idx, BCACHE_DEVICE_IDX_MAX - 1,
+ GFP_KERNEL);
if (idx < 0)
goto out_free_full_dirty_stripes;
@@ -986,7 +986,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
out_bioset_exit:
bioset_exit(&d->bio_split);
out_ida_remove:
- ida_simple_remove(&bcache_device_idx, idx);
+ ida_free(&bcache_device_idx, idx);
out_free_full_dirty_stripes:
kvfree(d->full_dirty_stripes);
out_free_stripe_sectors_dirty:
@@ -1914,8 +1914,9 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
INIT_LIST_HEAD(&c->btree_cache_freed);
INIT_LIST_HEAD(&c->data_buckets);
- iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size + 1) *
- sizeof(struct btree_iter_set);
+ iter_size = sizeof(struct btree_iter) +
+ ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) *
+ sizeof(struct btree_iter_set);
c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL);
if (!c->devices)
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 6956beb55326..826b14cae4e5 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -660,7 +660,7 @@ static unsigned int bch_root_usage(struct cache_set *c)
unsigned int bytes = 0;
struct bkey *k;
struct btree *b;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
goto lock_root;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 8827a6f130ad..792e070ccf38 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -908,15 +908,15 @@ static int bch_dirty_init_thread(void *arg)
struct dirty_init_thrd_info *info = arg;
struct bch_dirty_init_state *state = info->state;
struct cache_set *c = state->c;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct bkey *k, *p;
int cur_idx, prev_idx, skip_nr;
k = p = NULL;
prev_idx = 0;
- bch_btree_iter_init(&c->root->keys, &iter, NULL);
- k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
+ bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
+ k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
BUG_ON(!k);
p = k;
@@ -930,7 +930,7 @@ static int bch_dirty_init_thread(void *arg)
skip_nr = cur_idx - prev_idx;
while (skip_nr) {
- k = bch_btree_iter_next_filter(&iter,
+ k = bch_btree_iter_next_filter(&iter.iter,
&c->root->keys,
bch_ptr_bad);
if (k)
@@ -979,7 +979,7 @@ void bch_sectors_dirty_init(struct bcache_device *d)
int i;
struct btree *b = NULL;
struct bkey *k = NULL;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct sectors_dirty_init op;
struct cache_set *c = d->c;
struct bch_dirty_init_state state;
diff --git a/drivers/md/dm-bio-prison-v2.c b/drivers/md/dm-bio-prison-v2.c
index fd852981ef9c..cf433b0cf742 100644
--- a/drivers/md/dm-bio-prison-v2.c
+++ b/drivers/md/dm-bio-prison-v2.c
@@ -321,8 +321,7 @@ static bool __unlock(struct dm_bio_prison_v2 *prison,
{
BUG_ON(!cell->exclusive_lock);
- bio_list_merge(bios, &cell->bios);
- bio_list_init(&cell->bios);
+ bio_list_merge_init(bios, &cell->bios);
if (cell->shared_count) {
cell->exclusive_lock = false;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 911f73f7ebba..0fcbf8603846 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -115,8 +115,7 @@ static void __commit(struct work_struct *_ws)
*/
spin_lock_irq(&b->lock);
list_splice_init(&b->work_items, &work_items);
- bio_list_merge(&bios, &b->bios);
- bio_list_init(&b->bios);
+ bio_list_merge_init(&bios, &b->bios);
b->commit_scheduled = false;
spin_unlock_irq(&b->lock);
@@ -565,8 +564,7 @@ static void defer_bio(struct cache *cache, struct bio *bio)
static void defer_bios(struct cache *cache, struct bio_list *bios)
{
spin_lock_irq(&cache->lock);
- bio_list_merge(&cache->deferred_bios, bios);
- bio_list_init(bios);
+ bio_list_merge_init(&cache->deferred_bios, bios);
spin_unlock_irq(&cache->lock);
wake_deferred_bio_worker(cache);
@@ -1816,8 +1814,7 @@ static void process_deferred_bios(struct work_struct *ws)
bio_list_init(&bios);
spin_lock_irq(&cache->lock);
- bio_list_merge(&bios, &cache->deferred_bios);
- bio_list_init(&cache->deferred_bios);
+ bio_list_merge_init(&bios, &cache->deferred_bios);
spin_unlock_irq(&cache->lock);
while ((bio = bio_list_pop(&bios))) {
@@ -1847,8 +1844,7 @@ static void requeue_deferred_bios(struct cache *cache)
struct bio_list bios;
bio_list_init(&bios);
- bio_list_merge(&bios, &cache->deferred_bios);
- bio_list_init(&cache->deferred_bios);
+ bio_list_merge_init(&bios, &cache->deferred_bios);
while ((bio = bio_list_pop(&bios))) {
bio->bi_status = BLK_STS_DM_REQUEUE;
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index 94b2fc33f64b..3f68672ab7c9 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -1181,8 +1181,7 @@ static void process_deferred_discards(struct clone *clone)
struct bio_list discards = BIO_EMPTY_LIST;
spin_lock_irq(&clone->lock);
- bio_list_merge(&discards, &clone->deferred_discard_bios);
- bio_list_init(&clone->deferred_discard_bios);
+ bio_list_merge_init(&discards, &clone->deferred_discard_bios);
spin_unlock_irq(&clone->lock);
if (bio_list_empty(&discards))
@@ -1215,8 +1214,7 @@ static void process_deferred_bios(struct clone *clone)
struct bio_list bios = BIO_EMPTY_LIST;
spin_lock_irq(&clone->lock);
- bio_list_merge(&bios, &clone->deferred_bios);
- bio_list_init(&clone->deferred_bios);
+ bio_list_merge_init(&bios, &clone->deferred_bios);
spin_unlock_irq(&clone->lock);
if (bio_list_empty(&bios))
@@ -1237,11 +1235,9 @@ static void process_deferred_flush_bios(struct clone *clone)
* before issuing them or signaling their completion.
*/
spin_lock_irq(&clone->lock);
- bio_list_merge(&bios, &clone->deferred_flush_bios);
- bio_list_init(&clone->deferred_flush_bios);
-
- bio_list_merge(&bio_completions, &clone->deferred_flush_completions);
- bio_list_init(&clone->deferred_flush_completions);
+ bio_list_merge_init(&bios, &clone->deferred_flush_bios);
+ bio_list_merge_init(&bio_completions,
+ &clone->deferred_flush_completions);
spin_unlock_irq(&clone->lock);
if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index e6757a30dcca..08700bfc3e23 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -140,7 +140,7 @@ struct mapped_device {
#ifdef CONFIG_BLK_DEV_ZONED
unsigned int nr_zones;
- unsigned int *zwp_offset;
+ void *zone_revalidate_map;
#endif
#ifdef CONFIG_IMA
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index 6acfa5bf97a4..8f81e597858d 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1272,8 +1272,7 @@ static void process_deferred_bios(struct era *era)
bio_list_init(&marked_bios);
spin_lock(&era->deferred_lock);
- bio_list_merge(&deferred_bios, &era->deferred_bios);
- bio_list_init(&era->deferred_bios);
+ bio_list_merge_init(&deferred_bios, &era->deferred_bios);
spin_unlock(&era->deferred_lock);
if (bio_list_empty(&deferred_bios))
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 05d1328d1811..15b681b90153 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -704,8 +704,7 @@ static void process_queued_bios(struct work_struct *work)
return;
}
- bio_list_merge(&bios, &m->queued_bios);
- bio_list_init(&m->queued_bios);
+ bio_list_merge_init(&bios, &m->queued_bios);
spin_unlock_irqrestore(&m->lock, flags);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 41f1d731ae5a..2c6fbd87363f 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -2042,7 +2042,8 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
r = dm_set_zones_restrictions(t, q);
if (r)
return r;
- if (!static_key_enabled(&zoned_enabled.key))
+ if (blk_queue_is_zoned(q) &&
+ !static_key_enabled(&zoned_enabled.key))
static_branch_enable(&zoned_enabled);
}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 4793ad2aa1f7..f359984c8ef2 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -592,12 +592,6 @@ struct dm_thin_endio_hook {
struct dm_bio_prison_cell *cell;
};
-static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
-{
- bio_list_merge(bios, master);
- bio_list_init(master);
-}
-
static void error_bio_list(struct bio_list *bios, blk_status_t error)
{
struct bio *bio;
@@ -616,7 +610,7 @@ static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
bio_list_init(&bios);
spin_lock_irq(&tc->lock);
- __merge_bio_list(&bios, master);
+ bio_list_merge_init(&bios, master);
spin_unlock_irq(&tc->lock);
error_bio_list(&bios, error);
@@ -645,8 +639,8 @@ static void requeue_io(struct thin_c *tc)
bio_list_init(&bios);
spin_lock_irq(&tc->lock);
- __merge_bio_list(&bios, &tc->deferred_bio_list);
- __merge_bio_list(&bios, &tc->retry_on_resume_list);
+ bio_list_merge_init(&bios, &tc->deferred_bio_list);
+ bio_list_merge_init(&bios, &tc->retry_on_resume_list);
spin_unlock_irq(&tc->lock);
error_bio_list(&bios, BLK_STS_DM_REQUEUE);
diff --git a/drivers/md/dm-vdo/data-vio.c b/drivers/md/dm-vdo/data-vio.c
index 94f6f1ccfb7d..ab3ea8337809 100644
--- a/drivers/md/dm-vdo/data-vio.c
+++ b/drivers/md/dm-vdo/data-vio.c
@@ -604,8 +604,7 @@ static void assign_discard_permit(struct limiter *limiter)
static void get_waiters(struct limiter *limiter)
{
- bio_list_merge(&limiter->waiters, &limiter->new_waiters);
- bio_list_init(&limiter->new_waiters);
+ bio_list_merge_init(&limiter->waiters, &limiter->new_waiters);
}
static inline struct data_vio *get_available_data_vio(struct data_vio_pool *pool)
diff --git a/drivers/md/dm-vdo/flush.c b/drivers/md/dm-vdo/flush.c
index 57e87f0d7069..dd4fdee2ca0c 100644
--- a/drivers/md/dm-vdo/flush.c
+++ b/drivers/md/dm-vdo/flush.c
@@ -369,8 +369,7 @@ void vdo_dump_flusher(const struct flusher *flusher)
static void initialize_flush(struct vdo_flush *flush, struct vdo *vdo)
{
bio_list_init(&flush->bios);
- bio_list_merge(&flush->bios, &vdo->flusher->waiting_flush_bios);
- bio_list_init(&vdo->flusher->waiting_flush_bios);
+ bio_list_merge_init(&flush->bios, &vdo->flusher->waiting_flush_bios);
}
static void launch_flush(struct vdo_flush *flush)
diff --git a/drivers/md/dm-vdo/murmurhash3.c b/drivers/md/dm-vdo/murmurhash3.c
index 01d2743444ec..3a989efae142 100644
--- a/drivers/md/dm-vdo/murmurhash3.c
+++ b/drivers/md/dm-vdo/murmurhash3.c
@@ -137,7 +137,7 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
break;
default:
break;
- };
+ }
}
/* finalization */
diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c
index eb9832b22b14..8e6bcb0d786a 100644
--- a/drivers/md/dm-zone.c
+++ b/drivers/md/dm-zone.c
@@ -60,16 +60,23 @@ int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
struct dm_table *map;
int srcu_idx, ret;
- if (dm_suspended_md(md))
- return -EAGAIN;
+ if (!md->zone_revalidate_map) {
+ /* Regular user context */
+ if (dm_suspended_md(md))
+ return -EAGAIN;
- map = dm_get_live_table(md, &srcu_idx);
- if (!map)
- return -EIO;
+ map = dm_get_live_table(md, &srcu_idx);
+ if (!map)
+ return -EIO;
+ } else {
+ /* Zone revalidation during __bind() */
+ map = md->zone_revalidate_map;
+ }
ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb, data);
- dm_put_live_table(md, srcu_idx);
+ if (!md->zone_revalidate_map)
+ dm_put_live_table(md, srcu_idx);
return ret;
}
@@ -138,80 +145,47 @@ bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
}
}
-void dm_cleanup_zoned_dev(struct mapped_device *md)
+/*
+ * Count conventional zones of a mapped zoned device. If the device
+ * only has conventional zones, do not expose it as zoned.
+ */
+static int dm_check_zoned_cb(struct blk_zone *zone, unsigned int idx,
+ void *data)
{
- if (md->disk) {
- bitmap_free(md->disk->conv_zones_bitmap);
- md->disk->conv_zones_bitmap = NULL;
- bitmap_free(md->disk->seq_zones_wlock);
- md->disk->seq_zones_wlock = NULL;
- }
+ unsigned int *nr_conv_zones = data;
- kvfree(md->zwp_offset);
- md->zwp_offset = NULL;
- md->nr_zones = 0;
-}
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ (*nr_conv_zones)++;
-static unsigned int dm_get_zone_wp_offset(struct blk_zone *zone)
-{
- switch (zone->cond) {
- case BLK_ZONE_COND_IMP_OPEN:
- case BLK_ZONE_COND_EXP_OPEN:
- case BLK_ZONE_COND_CLOSED:
- return zone->wp - zone->start;
- case BLK_ZONE_COND_FULL:
- return zone->len;
- case BLK_ZONE_COND_EMPTY:
- case BLK_ZONE_COND_NOT_WP:
- case BLK_ZONE_COND_OFFLINE:
- case BLK_ZONE_COND_READONLY:
- default:
- /*
- * Conventional, offline and read-only zones do not have a valid
- * write pointer. Use 0 as for an empty zone.
- */
- return 0;
- }
+ return 0;
}
-static int dm_zone_revalidate_cb(struct blk_zone *zone, unsigned int idx,
- void *data)
+static int dm_check_zoned(struct mapped_device *md, struct dm_table *t)
{
- struct mapped_device *md = data;
struct gendisk *disk = md->disk;
+ unsigned int nr_conv_zones = 0;
+ int ret;
- switch (zone->type) {
- case BLK_ZONE_TYPE_CONVENTIONAL:
- if (!disk->conv_zones_bitmap) {
- disk->conv_zones_bitmap = bitmap_zalloc(disk->nr_zones,
- GFP_NOIO);
- if (!disk->conv_zones_bitmap)
- return -ENOMEM;
- }
- set_bit(idx, disk->conv_zones_bitmap);
- break;
- case BLK_ZONE_TYPE_SEQWRITE_REQ:
- case BLK_ZONE_TYPE_SEQWRITE_PREF:
- if (!disk->seq_zones_wlock) {
- disk->seq_zones_wlock = bitmap_zalloc(disk->nr_zones,
- GFP_NOIO);
- if (!disk->seq_zones_wlock)
- return -ENOMEM;
- }
- if (!md->zwp_offset) {
- md->zwp_offset =
- kvcalloc(disk->nr_zones, sizeof(unsigned int),
- GFP_KERNEL);
- if (!md->zwp_offset)
- return -ENOMEM;
- }
- md->zwp_offset[idx] = dm_get_zone_wp_offset(zone);
-
- break;
- default:
- DMERR("Invalid zone type 0x%x at sectors %llu",
- (int)zone->type, zone->start);
- return -ENODEV;
+ /* Count conventional zones */
+ md->zone_revalidate_map = t;
+ ret = dm_blk_report_zones(disk, 0, UINT_MAX,
+ dm_check_zoned_cb, &nr_conv_zones);
+ md->zone_revalidate_map = NULL;
+ if (ret < 0) {
+ DMERR("Check zoned failed %d", ret);
+ return ret;
+ }
+
+ /*
+ * If we only have conventional zones, expose the mapped device as
+ * a regular device.
+ */
+ if (nr_conv_zones >= ret) {
+ disk->queue->limits.max_open_zones = 0;
+ disk->queue->limits.max_active_zones = 0;
+ disk->queue->limits.zoned = false;
+ clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
+ disk->nr_zones = 0;
}
return 0;
@@ -226,41 +200,32 @@ static int dm_zone_revalidate_cb(struct blk_zone *zone, unsigned int idx,
static int dm_revalidate_zones(struct mapped_device *md, struct dm_table *t)
{
struct gendisk *disk = md->disk;
- unsigned int noio_flag;
int ret;
- /*
- * Check if something changed. If yes, cleanup the current resources
- * and reallocate everything.
- */
+ /* Revalidate only if something changed. */
if (!disk->nr_zones || disk->nr_zones != md->nr_zones)
- dm_cleanup_zoned_dev(md);
+ md->nr_zones = 0;
+
if (md->nr_zones)
return 0;
/*
- * Scan all zones to initialize everything. Ensure that all vmalloc
- * operations in this context are done as if GFP_NOIO was specified.
+ * Our table is not live yet. So the call to dm_get_live_table()
+ * in dm_blk_report_zones() will fail. Set a temporary pointer to
+ * our table for dm_blk_report_zones() to use directly.
*/
- noio_flag = memalloc_noio_save();
- ret = dm_blk_do_report_zones(md, t, 0, disk->nr_zones,
- dm_zone_revalidate_cb, md);
- memalloc_noio_restore(noio_flag);
- if (ret < 0)
- goto err;
- if (ret != disk->nr_zones) {
- ret = -EIO;
- goto err;
+ md->zone_revalidate_map = t;
+ ret = blk_revalidate_disk_zones(disk);
+ md->zone_revalidate_map = NULL;
+
+ if (ret) {
+ DMERR("Revalidate zones failed %d", ret);
+ return ret;
}
md->nr_zones = disk->nr_zones;
return 0;
-
-err:
- DMERR("Revalidate zones failed %d", ret);
- dm_cleanup_zoned_dev(md);
- return ret;
}
static int device_not_zone_append_capable(struct dm_target *ti,
@@ -289,294 +254,40 @@ static bool dm_table_supports_zone_append(struct dm_table *t)
int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q)
{
struct mapped_device *md = t->md;
+ int ret;
/*
- * For a zoned target, the number of zones should be updated for the
- * correct value to be exposed in sysfs queue/nr_zones.
+ * Check if zone append is natively supported, and if not, set the
+ * mapped device queue as needing zone append emulation.
*/
WARN_ON_ONCE(queue_is_mq(q));
- md->disk->nr_zones = bdev_nr_zones(md->disk->part0);
-
- /* Check if zone append is natively supported */
if (dm_table_supports_zone_append(t)) {
clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
- dm_cleanup_zoned_dev(md);
- return 0;
+ } else {
+ set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
+ blk_queue_max_zone_append_sectors(q, 0);
}
- /*
- * Mark the mapped device as needing zone append emulation and
- * initialize the emulation resources once the capacity is set.
- */
- set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
if (!get_capacity(md->disk))
return 0;
- return dm_revalidate_zones(md, t);
-}
-
-static int dm_update_zone_wp_offset_cb(struct blk_zone *zone, unsigned int idx,
- void *data)
-{
- unsigned int *wp_offset = data;
-
- *wp_offset = dm_get_zone_wp_offset(zone);
-
- return 0;
-}
-
-static int dm_update_zone_wp_offset(struct mapped_device *md, unsigned int zno,
- unsigned int *wp_ofst)
-{
- sector_t sector = zno * bdev_zone_sectors(md->disk->part0);
- unsigned int noio_flag;
- struct dm_table *t;
- int srcu_idx, ret;
-
- t = dm_get_live_table(md, &srcu_idx);
- if (!t)
- return -EIO;
-
- /*
- * Ensure that all memory allocations in this context are done as if
- * GFP_NOIO was specified.
- */
- noio_flag = memalloc_noio_save();
- ret = dm_blk_do_report_zones(md, t, sector, 1,
- dm_update_zone_wp_offset_cb, wp_ofst);
- memalloc_noio_restore(noio_flag);
-
- dm_put_live_table(md, srcu_idx);
-
- if (ret != 1)
- return -EIO;
-
- return 0;
-}
-
-struct orig_bio_details {
- enum req_op op;
- unsigned int nr_sectors;
-};
-
-/*
- * First phase of BIO mapping for targets with zone append emulation:
- * check all BIO that change a zone writer pointer and change zone
- * append operations into regular write operations.
- */
-static bool dm_zone_map_bio_begin(struct mapped_device *md,
- unsigned int zno, struct bio *clone)
-{
- sector_t zsectors = bdev_zone_sectors(md->disk->part0);
- unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]);
-
- /*
- * If the target zone is in an error state, recover by inspecting the
- * zone to get its current write pointer position. Note that since the
- * target zone is already locked, a BIO issuing context should never
- * see the zone write in the DM_ZONE_UPDATING_WP_OFST state.
- */
- if (zwp_offset == DM_ZONE_INVALID_WP_OFST) {
- if (dm_update_zone_wp_offset(md, zno, &zwp_offset))
- return false;
- WRITE_ONCE(md->zwp_offset[zno], zwp_offset);
- }
-
- switch (bio_op(clone)) {
- case REQ_OP_ZONE_RESET:
- case REQ_OP_ZONE_FINISH:
- return true;
- case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE:
- /* Writes must be aligned to the zone write pointer */
- if ((clone->bi_iter.bi_sector & (zsectors - 1)) != zwp_offset)
- return false;
- break;
- case REQ_OP_ZONE_APPEND:
- /*
- * Change zone append operations into a non-mergeable regular
- * writes directed at the current write pointer position of the
- * target zone.
- */
- clone->bi_opf = REQ_OP_WRITE | REQ_NOMERGE |
- (clone->bi_opf & (~REQ_OP_MASK));
- clone->bi_iter.bi_sector += zwp_offset;
- break;
- default:
- DMWARN_LIMIT("Invalid BIO operation");
- return false;
- }
-
- /* Cannot write to a full zone */
- if (zwp_offset >= zsectors)
- return false;
-
- return true;
-}
-
-/*
- * Second phase of BIO mapping for targets with zone append emulation:
- * update the zone write pointer offset array to account for the additional
- * data written to a zone. Note that at this point, the remapped clone BIO
- * may already have completed, so we do not touch it.
- */
-static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, unsigned int zno,
- struct orig_bio_details *orig_bio_details,
- unsigned int nr_sectors)
-{
- unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]);
-
- /* The clone BIO may already have been completed and failed */
- if (zwp_offset == DM_ZONE_INVALID_WP_OFST)
- return BLK_STS_IOERR;
-
- /* Update the zone wp offset */
- switch (orig_bio_details->op) {
- case REQ_OP_ZONE_RESET:
- WRITE_ONCE(md->zwp_offset[zno], 0);
- return BLK_STS_OK;
- case REQ_OP_ZONE_FINISH:
- WRITE_ONCE(md->zwp_offset[zno],
- bdev_zone_sectors(md->disk->part0));
- return BLK_STS_OK;
- case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE:
- WRITE_ONCE(md->zwp_offset[zno], zwp_offset + nr_sectors);
- return BLK_STS_OK;
- case REQ_OP_ZONE_APPEND:
- /*
- * Check that the target did not truncate the write operation
- * emulating a zone append.
- */
- if (nr_sectors != orig_bio_details->nr_sectors) {
- DMWARN_LIMIT("Truncated write for zone append");
- return BLK_STS_IOERR;
- }
- WRITE_ONCE(md->zwp_offset[zno], zwp_offset + nr_sectors);
- return BLK_STS_OK;
- default:
- DMWARN_LIMIT("Invalid BIO operation");
- return BLK_STS_IOERR;
- }
-}
-
-static inline void dm_zone_lock(struct gendisk *disk, unsigned int zno,
- struct bio *clone)
-{
- if (WARN_ON_ONCE(bio_flagged(clone, BIO_ZONE_WRITE_LOCKED)))
- return;
-
- wait_on_bit_lock_io(disk->seq_zones_wlock, zno, TASK_UNINTERRUPTIBLE);
- bio_set_flag(clone, BIO_ZONE_WRITE_LOCKED);
-}
-
-static inline void dm_zone_unlock(struct gendisk *disk, unsigned int zno,
- struct bio *clone)
-{
- if (!bio_flagged(clone, BIO_ZONE_WRITE_LOCKED))
- return;
-
- WARN_ON_ONCE(!test_bit(zno, disk->seq_zones_wlock));
- clear_bit_unlock(zno, disk->seq_zones_wlock);
- smp_mb__after_atomic();
- wake_up_bit(disk->seq_zones_wlock, zno);
-
- bio_clear_flag(clone, BIO_ZONE_WRITE_LOCKED);
-}
-
-static bool dm_need_zone_wp_tracking(struct bio *bio)
-{
/*
- * Special processing is not needed for operations that do not need the
- * zone write lock, that is, all operations that target conventional
- * zones and all operations that do not modify directly a sequential
- * zone write pointer.
+ * Check that the mapped device will indeed be zoned, that is, that it
+ * has sequential write required zones.
*/
- if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
- return false;
- switch (bio_op(bio)) {
- case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE:
- case REQ_OP_ZONE_RESET:
- case REQ_OP_ZONE_FINISH:
- case REQ_OP_ZONE_APPEND:
- return bio_zone_is_seq(bio);
- default:
- return false;
- }
-}
-
-/*
- * Special IO mapping for targets needing zone append emulation.
- */
-int dm_zone_map_bio(struct dm_target_io *tio)
-{
- struct dm_io *io = tio->io;
- struct dm_target *ti = tio->ti;
- struct mapped_device *md = io->md;
- struct bio *clone = &tio->clone;
- struct orig_bio_details orig_bio_details;
- unsigned int zno;
- blk_status_t sts;
- int r;
-
- /*
- * IOs that do not change a zone write pointer do not need
- * any additional special processing.
- */
- if (!dm_need_zone_wp_tracking(clone))
- return ti->type->map(ti, clone);
-
- /* Lock the target zone */
- zno = bio_zone_no(clone);
- dm_zone_lock(md->disk, zno, clone);
-
- orig_bio_details.nr_sectors = bio_sectors(clone);
- orig_bio_details.op = bio_op(clone);
+ ret = dm_check_zoned(md, t);
+ if (ret)
+ return ret;
+ if (!blk_queue_is_zoned(q))
+ return 0;
- /*
- * Check that the bio and the target zone write pointer offset are
- * both valid, and if the bio is a zone append, remap it to a write.
- */
- if (!dm_zone_map_bio_begin(md, zno, clone)) {
- dm_zone_unlock(md->disk, zno, clone);
- return DM_MAPIO_KILL;
+ if (!md->disk->nr_zones) {
+ DMINFO("%s using %s zone append",
+ md->disk->disk_name,
+ queue_emulates_zone_append(q) ? "emulated" : "native");
}
- /* Let the target do its work */
- r = ti->type->map(ti, clone);
- switch (r) {
- case DM_MAPIO_SUBMITTED:
- /*
- * The target submitted the clone BIO. The target zone will
- * be unlocked on completion of the clone.
- */
- sts = dm_zone_map_bio_end(md, zno, &orig_bio_details,
- *tio->len_ptr);
- break;
- case DM_MAPIO_REMAPPED:
- /*
- * The target only remapped the clone BIO. In case of error,
- * unlock the target zone here as the clone will not be
- * submitted.
- */
- sts = dm_zone_map_bio_end(md, zno, &orig_bio_details,
- *tio->len_ptr);
- if (sts != BLK_STS_OK)
- dm_zone_unlock(md->disk, zno, clone);
- break;
- case DM_MAPIO_REQUEUE:
- case DM_MAPIO_KILL:
- default:
- dm_zone_unlock(md->disk, zno, clone);
- sts = BLK_STS_IOERR;
- break;
- }
-
- if (sts != BLK_STS_OK)
- return DM_MAPIO_KILL;
-
- return r;
+ return dm_revalidate_zones(md, t);
}
/*
@@ -587,61 +298,17 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone)
struct mapped_device *md = io->md;
struct gendisk *disk = md->disk;
struct bio *orig_bio = io->orig_bio;
- unsigned int zwp_offset;
- unsigned int zno;
/*
- * For targets that do not emulate zone append, we only need to
- * handle native zone-append bios.
+ * Get the offset within the zone of the written sector
+ * and add that to the original bio sector position.
*/
- if (!dm_emulate_zone_append(md)) {
- /*
- * Get the offset within the zone of the written sector
- * and add that to the original bio sector position.
- */
- if (clone->bi_status == BLK_STS_OK &&
- bio_op(clone) == REQ_OP_ZONE_APPEND) {
- sector_t mask =
- (sector_t)bdev_zone_sectors(disk->part0) - 1;
-
- orig_bio->bi_iter.bi_sector +=
- clone->bi_iter.bi_sector & mask;
- }
-
- return;
- }
+ if (clone->bi_status == BLK_STS_OK &&
+ bio_op(clone) == REQ_OP_ZONE_APPEND) {
+ sector_t mask = bdev_zone_sectors(disk->part0) - 1;
- /*
- * For targets that do emulate zone append, if the clone BIO does not
- * own the target zone write lock, we have nothing to do.
- */
- if (!bio_flagged(clone, BIO_ZONE_WRITE_LOCKED))
- return;
-
- zno = bio_zone_no(orig_bio);
-
- if (clone->bi_status != BLK_STS_OK) {
- /*
- * BIOs that modify a zone write pointer may leave the zone
- * in an unknown state in case of failure (e.g. the write
- * pointer was only partially advanced). In this case, set
- * the target zone write pointer as invalid unless it is
- * already being updated.
- */
- WRITE_ONCE(md->zwp_offset[zno], DM_ZONE_INVALID_WP_OFST);
- } else if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) {
- /*
- * Get the written sector for zone append operation that were
- * emulated using regular write operations.
- */
- zwp_offset = READ_ONCE(md->zwp_offset[zno]);
- if (WARN_ON_ONCE(zwp_offset < bio_sectors(orig_bio)))
- WRITE_ONCE(md->zwp_offset[zno],
- DM_ZONE_INVALID_WP_OFST);
- else
- orig_bio->bi_iter.bi_sector +=
- zwp_offset - bio_sectors(orig_bio);
+ orig_bio->bi_iter.bi_sector += clone->bi_iter.bi_sector & mask;
}
- dm_zone_unlock(disk, zno, clone);
+ return;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 56aa2a8b9d71..597dd7a25823 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -765,7 +765,7 @@ static struct table_device *open_table_device(struct mapped_device *md,
return td;
out_blkdev_put:
- fput(bdev_file);
+ __fput_sync(bdev_file);
out_free_td:
kfree(td);
return ERR_PTR(r);
@@ -778,7 +778,13 @@ static void close_table_device(struct table_device *td, struct mapped_device *md
{
if (md->disk->slave_dir)
bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
- fput(td->dm_dev.bdev_file);
+
+ /* Leverage async fput() if DMF_DEFERRED_REMOVE set */
+ if (unlikely(test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
+ fput(td->dm_dev.bdev_file);
+ else
+ __fput_sync(td->dm_dev.bdev_file);
+
put_dax(td->dm_dev.dax_dev);
list_del(&td->list);
kfree(td);
@@ -1422,25 +1428,12 @@ static void __map_bio(struct bio *clone)
down(&md->swap_bios_semaphore);
}
- if (static_branch_unlikely(&zoned_enabled)) {
- /*
- * Check if the IO needs a special mapping due to zone append
- * emulation on zoned target. In this case, dm_zone_map_bio()
- * calls the target map operation.
- */
- if (unlikely(dm_emulate_zone_append(md)))
- r = dm_zone_map_bio(tio);
- else
- goto do_map;
- } else {
-do_map:
- if (likely(ti->type->map == linear_map))
- r = linear_map(ti, clone);
- else if (ti->type->map == stripe_map)
- r = stripe_map(ti, clone);
- else
- r = ti->type->map(ti, clone);
- }
+ if (likely(ti->type->map == linear_map))
+ r = linear_map(ti, clone);
+ else if (ti->type->map == stripe_map)
+ r = stripe_map(ti, clone);
+ else
+ r = ti->type->map(ti, clone);
switch (r) {
case DM_MAPIO_SUBMITTED:
@@ -1768,6 +1761,33 @@ static void init_clone_info(struct clone_info *ci, struct dm_io *io,
ci->sector_count = 0;
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static inline bool dm_zone_bio_needs_split(struct mapped_device *md,
+ struct bio *bio)
+{
+ /*
+ * For mapped device that need zone append emulation, we must
+ * split any large BIO that straddles zone boundaries.
+ */
+ return dm_emulate_zone_append(md) && bio_straddles_zones(bio) &&
+ !bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
+}
+static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
+{
+ return dm_emulate_zone_append(md) && blk_zone_plug_bio(bio, 0);
+}
+#else
+static inline bool dm_zone_bio_needs_split(struct mapped_device *md,
+ struct bio *bio)
+{
+ return false;
+}
+static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
+{
+ return false;
+}
+#endif
+
/*
* Entry point to split a bio into clones and submit them to the targets.
*/
@@ -1777,19 +1797,32 @@ static void dm_split_and_process_bio(struct mapped_device *md,
struct clone_info ci;
struct dm_io *io;
blk_status_t error = BLK_STS_OK;
- bool is_abnormal;
+ bool is_abnormal, need_split;
- is_abnormal = is_abnormal_io(bio);
- if (unlikely(is_abnormal)) {
+ need_split = is_abnormal = is_abnormal_io(bio);
+ if (static_branch_unlikely(&zoned_enabled))
+ need_split = is_abnormal || dm_zone_bio_needs_split(md, bio);
+
+ if (unlikely(need_split)) {
/*
* Use bio_split_to_limits() for abnormal IO (e.g. discard, etc)
* otherwise associated queue_limits won't be imposed.
+ * Also split the BIO for mapped devices needing zone append
+ * emulation to ensure that the BIO does not cross zone
+ * boundaries.
*/
bio = bio_split_to_limits(bio);
if (!bio)
return;
}
+ /*
+ * Use the block layer zone write plugging for mapped devices that
+ * need zone append emulation (e.g. dm-crypt).
+ */
+ if (static_branch_unlikely(&zoned_enabled) && dm_zone_plug_bio(md, bio))
+ return;
+
/* Only support nowait for normal IO */
if (unlikely(bio->bi_opf & REQ_NOWAIT) && !is_abnormal) {
io = alloc_io(md, bio, GFP_NOWAIT);
@@ -2010,7 +2043,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
md->dax_dev = NULL;
}
- dm_cleanup_zoned_dev(md);
if (md->disk) {
spin_lock(&_minor_lock);
md->disk->private_data = NULL;
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 7f1acbf6bd9e..e0c57f19839b 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -104,13 +104,11 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q);
void dm_zone_endio(struct dm_io *io, struct bio *clone);
#ifdef CONFIG_BLK_DEV_ZONED
-void dm_cleanup_zoned_dev(struct mapped_device *md);
int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
bool dm_is_zone_write(struct mapped_device *md, struct bio *bio);
int dm_zone_map_bio(struct dm_target_io *io);
#else
-static inline void dm_cleanup_zoned_dev(struct mapped_device *md) {}
#define dm_blk_report_zones NULL
static inline bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
{
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 059afc24c08b..0a2d37eb38ef 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -1424,7 +1424,7 @@ __acquires(bitmap->lock)
sector_t chunk = offset >> bitmap->chunkshift;
unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
- sector_t csize;
+ sector_t csize = ((sector_t)1) << bitmap->chunkshift;
int err;
if (page >= bitmap->pages) {
@@ -1433,6 +1433,7 @@ __acquires(bitmap->lock)
* End-of-device while looking for a whole page or
* user set a huge number to sysfs bitmap_set_bits.
*/
+ *blocks = csize - (offset & (csize - 1));
return NULL;
}
err = md_bitmap_checkpage(bitmap, page, create, 0);
@@ -1441,8 +1442,7 @@ __acquires(bitmap->lock)
bitmap->bp[page].map == NULL)
csize = ((sector_t)1) << (bitmap->chunkshift +
PAGE_COUNTER_SHIFT);
- else
- csize = ((sector_t)1) << bitmap->chunkshift;
+
*blocks = csize - (offset & (csize - 1));
if (err < 0)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e575e74aabf5..aff9118ff697 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8087,7 +8087,8 @@ void md_wakeup_thread(struct md_thread __rcu *thread)
if (t) {
pr_debug("md: waking up MD thread %s.\n", t->tsk->comm);
set_bit(THREAD_WAKEUP, &t->flags);
- wake_up(&t->wqueue);
+ if (wq_has_sleeper(&t->wqueue))
+ wake_up(&t->wqueue);
}
rcu_read_unlock();
}
@@ -8582,6 +8583,10 @@ static int is_mddev_idle(struct mddev *mddev, int init)
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
struct gendisk *disk = rdev->bdev->bd_disk;
+
+ if (!init && !blk_queue_io_stat(disk->queue))
+ continue;
+
curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
atomic_read(&disk->sync_io);
/* sync IO will cause sync_io to increase before the disk_stats
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 097d9dbd69b8..ca085ecad504 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -621,7 +621,8 @@ extern void mddev_unlock(struct mddev *mddev);
static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
{
- atomic_add(nr_sectors, &bdev->bd_disk->sync_io);
+ if (blk_queue_io_stat(bdev->bd_disk->queue))
+ atomic_add(nr_sectors, &bdev->bd_disk->sync_io);
}
static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index be8ac24f50b6..7b8a71ca66dd 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1558,7 +1558,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
for (j = 0; j < i; j++)
if (r1_bio->bios[j])
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
- free_r1bio(r1_bio);
+ mempool_free(r1_bio, &conf->r1bio_pool);
allow_barrier(conf, bio->bi_iter.bi_sector);
if (bio->bi_opf & REQ_NOWAIT) {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d874abfc1836..2bd1ce9b3922 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -36,7 +36,6 @@
*/
#include <linux/blkdev.h>
-#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
@@ -6734,6 +6733,9 @@ static void raid5d(struct md_thread *thread)
int batch_size, released;
unsigned int offset;
+ if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
+ break;
+
released = release_stripe_list(conf, conf->temp_inactive_list);
if (released)
clear_bit(R5_DID_ALLOC, &conf->cache_state);
@@ -6770,18 +6772,7 @@ static void raid5d(struct md_thread *thread)
spin_unlock_irq(&conf->device_lock);
md_check_recovery(mddev);
spin_lock_irq(&conf->device_lock);
-
- /*
- * Waiting on MD_SB_CHANGE_PENDING below may deadlock
- * seeing md_check_recovery() is needed to clear
- * the flag when using mdmon.
- */
- continue;
}
-
- wait_event_lock_irq(mddev->sb_wait,
- !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags),
- conf->device_lock);
}
pr_debug("%d stripes handled\n", handled);
diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
index 4c34344dc7dc..d7027d600208 100644
--- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
@@ -50,12 +50,12 @@ static void mtk_vcodec_vpu_reset_dec_handler(void *priv)
dev_err(&dev->plat_dev->dev, "Watchdog timeout!!");
- mutex_lock(&dev->dev_mutex);
+ mutex_lock(&dev->dev_ctx_lock);
list_for_each_entry(ctx, &dev->ctx_list, list) {
ctx->state = MTK_STATE_ABORT;
mtk_v4l2_vdec_dbg(0, ctx, "[%d] Change to state MTK_STATE_ABORT", ctx->id);
}
- mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&dev->dev_ctx_lock);
}
static void mtk_vcodec_vpu_reset_enc_handler(void *priv)
@@ -65,12 +65,12 @@ static void mtk_vcodec_vpu_reset_enc_handler(void *priv)
dev_err(&dev->plat_dev->dev, "Watchdog timeout!!");
- mutex_lock(&dev->dev_mutex);
+ mutex_lock(&dev->dev_ctx_lock);
list_for_each_entry(ctx, &dev->ctx_list, list) {
ctx->state = MTK_STATE_ABORT;
mtk_v4l2_vdec_dbg(0, ctx, "[%d] Change to state MTK_STATE_ABORT", ctx->id);
}
- mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&dev->dev_ctx_lock);
}
static const struct mtk_vcodec_fw_ops mtk_vcodec_vpu_msg = {
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
index f47c98faf068..2073781ccadb 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
@@ -268,7 +268,9 @@ static int fops_vcodec_open(struct file *file)
ctx->dev->vdec_pdata->init_vdec_params(ctx);
+ mutex_lock(&dev->dev_ctx_lock);
list_add(&ctx->list, &dev->ctx_list);
+ mutex_unlock(&dev->dev_ctx_lock);
mtk_vcodec_dbgfs_create(ctx);
mutex_unlock(&dev->dev_mutex);
@@ -311,7 +313,9 @@ static int fops_vcodec_release(struct file *file)
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
mtk_vcodec_dbgfs_remove(dev, ctx->id);
+ mutex_lock(&dev->dev_ctx_lock);
list_del_init(&ctx->list);
+ mutex_unlock(&dev->dev_ctx_lock);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
return 0;
@@ -404,6 +408,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
for (i = 0; i < MTK_VDEC_HW_MAX; i++)
mutex_init(&dev->dec_mutex[i]);
mutex_init(&dev->dev_mutex);
+ mutex_init(&dev->dev_ctx_lock);
spin_lock_init(&dev->irqlock);
snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s",
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
index 849b89dd205c..85b2c0d3d8bc 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
@@ -241,6 +241,7 @@ struct mtk_vcodec_dec_ctx {
*
* @dec_mutex: decoder hardware lock
* @dev_mutex: video_device lock
+ * @dev_ctx_lock: the lock of context list
* @decode_workqueue: decode work queue
*
* @irqlock: protect data access by irq handler and work thread
@@ -282,6 +283,7 @@ struct mtk_vcodec_dec_dev {
/* decoder hardware mutex lock */
struct mutex dec_mutex[MTK_VDEC_HW_MAX];
struct mutex dev_mutex;
+ struct mutex dev_ctx_lock;
struct workqueue_struct *decode_workqueue;
spinlock_t irqlock;
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
index 06ed47df693b..21836dd6ef85 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
@@ -869,7 +869,6 @@ static int vdec_hevc_slice_init(struct mtk_vcodec_dec_ctx *ctx)
inst->vpu.codec_type = ctx->current_codec;
inst->vpu.capture_type = ctx->capture_fourcc;
- ctx->drv_handle = inst;
err = vpu_dec_init(&inst->vpu);
if (err) {
mtk_vdec_err(ctx, "vdec_hevc init err=%d", err);
@@ -898,6 +897,7 @@ static int vdec_hevc_slice_init(struct mtk_vcodec_dec_ctx *ctx)
mtk_vdec_debug(ctx, "lat hevc instance >> %p, codec_type = 0x%x",
inst, inst->vpu.codec_type);
+ ctx->drv_handle = inst;
return 0;
error_free_inst:
kfree(inst);
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
index 19407f9bc773..987b3d71b662 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
@@ -449,7 +449,7 @@ static int vdec_vp8_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
inst->frm_cnt, y_fb_dma, c_fb_dma, fb);
inst->cur_fb = fb;
- dec->bs_dma = (unsigned long)bs->dma_addr;
+ dec->bs_dma = (uint64_t)bs->dma_addr;
dec->bs_sz = bs->size;
dec->cur_y_fb_dma = y_fb_dma;
dec->cur_c_fb_dma = c_fb_dma;
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
index 55355fa70090..039082f600c8 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
@@ -16,6 +16,7 @@
#include "../vdec_drv_base.h"
#include "../vdec_vpu_if.h"
+#define VP9_MAX_SUPER_FRAMES_NUM 8
#define VP9_SUPER_FRAME_BS_SZ 64
#define MAX_VP9_DPB_SIZE 9
@@ -133,11 +134,11 @@ struct vp9_sf_ref_fb {
*/
struct vdec_vp9_vsi {
unsigned char sf_bs_buf[VP9_SUPER_FRAME_BS_SZ];
- struct vp9_sf_ref_fb sf_ref_fb[VP9_MAX_FRM_BUF_NUM-1];
+ struct vp9_sf_ref_fb sf_ref_fb[VP9_MAX_SUPER_FRAMES_NUM];
int sf_next_ref_fb_idx;
unsigned int sf_frm_cnt;
- unsigned int sf_frm_offset[VP9_MAX_FRM_BUF_NUM-1];
- unsigned int sf_frm_sz[VP9_MAX_FRM_BUF_NUM-1];
+ unsigned int sf_frm_offset[VP9_MAX_SUPER_FRAMES_NUM];
+ unsigned int sf_frm_sz[VP9_MAX_SUPER_FRAMES_NUM];
unsigned int sf_frm_idx;
unsigned int sf_init;
struct vdec_fb fb;
@@ -526,7 +527,7 @@ static void vp9_swap_frm_bufs(struct vdec_vp9_inst *inst)
/* if this super frame and it is not last sub-frame, get next fb for
* sub-frame decode
*/
- if (vsi->sf_frm_cnt > 0 && vsi->sf_frm_idx != vsi->sf_frm_cnt - 1)
+ if (vsi->sf_frm_cnt > 0 && vsi->sf_frm_idx != vsi->sf_frm_cnt)
vsi->sf_next_ref_fb_idx = vp9_get_sf_ref_fb(inst);
}
@@ -735,7 +736,7 @@ static void get_free_fb(struct vdec_vp9_inst *inst, struct vdec_fb **out_fb)
static int validate_vsi_array_indexes(struct vdec_vp9_inst *inst,
struct vdec_vp9_vsi *vsi) {
- if (vsi->sf_frm_idx >= VP9_MAX_FRM_BUF_NUM - 1) {
+ if (vsi->sf_frm_idx > VP9_MAX_SUPER_FRAMES_NUM) {
mtk_vdec_err(inst->ctx, "Invalid vsi->sf_frm_idx=%u.", vsi->sf_frm_idx);
return -EIO;
}
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
index cf48d09b78d7..eea709d93820 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
@@ -1074,7 +1074,7 @@ static int vdec_vp9_slice_setup_tile_buffer(struct vdec_vp9_slice_instance *inst
unsigned int mi_row;
unsigned int mi_col;
unsigned int offset;
- unsigned int pa;
+ dma_addr_t pa;
unsigned int size;
struct vdec_vp9_slice_tiles *tiles;
unsigned char *pos;
@@ -1109,7 +1109,7 @@ static int vdec_vp9_slice_setup_tile_buffer(struct vdec_vp9_slice_instance *inst
pos = va + offset;
end = va + bs->size;
/* truncated */
- pa = (unsigned int)bs->dma_addr + offset;
+ pa = bs->dma_addr + offset;
tb = instance->tile.va;
for (i = 0; i < rows; i++) {
for (j = 0; j < cols; j++) {
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c
index 82e57ae983d5..da6be556727b 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c
@@ -77,12 +77,14 @@ static bool vpu_dec_check_ap_inst(struct mtk_vcodec_dec_dev *dec_dev, struct vde
struct mtk_vcodec_dec_ctx *ctx;
int ret = false;
+ mutex_lock(&dec_dev->dev_ctx_lock);
list_for_each_entry(ctx, &dec_dev->ctx_list, list) {
if (!IS_ERR_OR_NULL(ctx) && ctx->vpu_inst == vpu) {
ret = true;
break;
}
}
+ mutex_unlock(&dec_dev->dev_ctx_lock);
return ret;
}
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
index 6319f24bc714..3cb8a1622222 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
@@ -177,7 +177,9 @@ static int fops_vcodec_open(struct file *file)
mtk_v4l2_venc_dbg(2, ctx, "Create instance [%d]@%p m2m_ctx=%p ",
ctx->id, ctx, ctx->m2m_ctx);
+ mutex_lock(&dev->dev_ctx_lock);
list_add(&ctx->list, &dev->ctx_list);
+ mutex_unlock(&dev->dev_ctx_lock);
mutex_unlock(&dev->dev_mutex);
mtk_v4l2_venc_dbg(0, ctx, "%s encoder [%d]", dev_name(&dev->plat_dev->dev),
@@ -212,7 +214,9 @@ static int fops_vcodec_release(struct file *file)
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+ mutex_lock(&dev->dev_ctx_lock);
list_del_init(&ctx->list);
+ mutex_unlock(&dev->dev_ctx_lock);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
return 0;
@@ -294,6 +298,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
mutex_init(&dev->enc_mutex);
mutex_init(&dev->dev_mutex);
+ mutex_init(&dev->dev_ctx_lock);
spin_lock_init(&dev->irqlock);
snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s",
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
index a042f607ed8d..0bd85d0fb379 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
@@ -178,6 +178,7 @@ struct mtk_vcodec_enc_ctx {
*
* @enc_mutex: encoder hardware lock.
* @dev_mutex: video_device lock
+ * @dev_ctx_lock: the lock of context list
* @encode_workqueue: encode work queue
*
* @enc_irq: h264 encoder irq resource
@@ -205,6 +206,7 @@ struct mtk_vcodec_enc_dev {
/* encoder hardware mutex lock */
struct mutex enc_mutex;
struct mutex dev_mutex;
+ struct mutex dev_ctx_lock;
struct workqueue_struct *encode_workqueue;
int enc_irq;
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
index 84ad1cc6ad17..51bb7ee141b9 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
@@ -47,12 +47,14 @@ static bool vpu_enc_check_ap_inst(struct mtk_vcodec_enc_dev *enc_dev, struct ven
struct mtk_vcodec_enc_ctx *ctx;
int ret = false;
+ mutex_lock(&enc_dev->dev_ctx_lock);
list_for_each_entry(ctx, &enc_dev->ctx_list, list) {
if (!IS_ERR_OR_NULL(ctx) && ctx->vpu_inst == vpu) {
ret = true;
break;
}
}
+ mutex_unlock(&enc_dev->dev_ctx_lock);
return ret;
}
diff --git a/drivers/memory/brcmstb_memc.c b/drivers/memory/brcmstb_memc.c
index ea9213f7152e..4f17a93aa028 100644
--- a/drivers/memory/brcmstb_memc.c
+++ b/drivers/memory/brcmstb_memc.c
@@ -243,6 +243,7 @@ static const struct of_device_id brcmstb_memc_of_match[] = {
},
{}
};
+MODULE_DEVICE_TABLE(of, brcmstb_memc_of_match);
static int brcmstb_memc_suspend(struct device *dev)
{
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index 572c7fbdcfd3..fbe52ecc0eca 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -450,6 +450,7 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = {
{.compatible = "mediatek,mt8195-smi-larb", .data = &mtk_smi_larb_mt8195},
{}
};
+MODULE_DEVICE_TABLE(of, mtk_smi_larb_of_ids);
static int mtk_smi_larb_sleep_ctrl_enable(struct mtk_smi_larb *larb)
{
@@ -735,6 +736,7 @@ static const struct of_device_id mtk_smi_common_of_ids[] = {
{.compatible = "mediatek,mt8365-smi-common", .data = &mtk_smi_common_mt8365},
{}
};
+MODULE_DEVICE_TABLE(of, mtk_smi_common_of_ids);
static int mtk_smi_common_probe(struct platform_device *pdev)
{
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 300f8e955a53..0f80c840afc3 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2964,17 +2964,13 @@ mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
goto out_free;
manufacture_reply = data_out + sizeof(struct rep_manu_request);
- strscpy(edev->vendor_id, manufacture_reply->vendor_id,
- sizeof(edev->vendor_id));
- strscpy(edev->product_id, manufacture_reply->product_id,
- sizeof(edev->product_id));
- strscpy(edev->product_rev, manufacture_reply->product_rev,
- sizeof(edev->product_rev));
+ memtostr(edev->vendor_id, manufacture_reply->vendor_id);
+ memtostr(edev->product_id, manufacture_reply->product_id);
+ memtostr(edev->product_rev, manufacture_reply->product_rev);
edev->level = manufacture_reply->sas_format;
if (manufacture_reply->sas_format) {
- strscpy(edev->component_vendor_id,
- manufacture_reply->component_vendor_id,
- sizeof(edev->component_vendor_id));
+ memtostr(edev->component_vendor_id,
+ manufacture_reply->component_vendor_id);
tmp = (u8 *)&manufacture_reply->component_id;
edev->component_id = tmp[0] << 8 | tmp[1];
edev->component_revision_id =
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 1a64364700eb..0ad2ff9065aa 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -1002,7 +1002,7 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
} else {
pcr->card_removed |= SD_EXIST;
pcr->card_inserted &= ~SD_EXIST;
- if (PCI_PID(pcr) == PID_5261) {
+ if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
rtsx_pci_write_register(pcr, RTS5261_FW_STATUS,
RTS5261_EXPRESS_LINK_FAIL_MASK, 0);
pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 572333ead5fb..4bd4f32bcdab 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -758,15 +758,6 @@ static int at24_probe(struct i2c_client *client)
}
pm_runtime_enable(dev);
- at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
- if (IS_ERR(at24->nvmem)) {
- pm_runtime_disable(dev);
- if (!pm_runtime_status_suspended(dev))
- regulator_disable(at24->vcc_reg);
- return dev_err_probe(dev, PTR_ERR(at24->nvmem),
- "failed to register nvmem\n");
- }
-
/*
* Perform a one-byte test read to verify that the chip is functional,
* unless powering on the device is to be avoided during probe (i.e.
@@ -782,6 +773,15 @@ static int at24_probe(struct i2c_client *client)
}
}
+ at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
+ if (IS_ERR(at24->nvmem)) {
+ pm_runtime_disable(dev);
+ if (!pm_runtime_status_suspended(dev))
+ regulator_disable(at24->vcc_reg);
+ return dev_err_probe(dev, PTR_ERR(at24->nvmem),
+ "failed to register nvmem\n");
+ }
+
/* If this a SPD EEPROM, probe for DDR3 thermal sensor */
if (cdata == &at24_data_spd)
at24_probe_temp_sensor(client);
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index 95ef971b5e1c..b28701138b4b 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -19,7 +19,7 @@ KASAN_SANITIZE_rodata.o := n
KCSAN_SANITIZE_rodata.o := n
KCOV_INSTRUMENT_rodata.o := n
OBJECT_FILES_NON_STANDARD_rodata.o := y
-CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS)
+CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS) $(CC_FLAGS_CFI)
OBJCOPYFLAGS :=
OBJCOPYFLAGS_rodata_objcopy.o := \
diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
index b93404d65650..5b861dbff27e 100644
--- a/drivers/misc/lkdtm/perms.c
+++ b/drivers/misc/lkdtm/perms.c
@@ -61,7 +61,7 @@ static void *setup_function_descriptor(func_desc_t *fdesc, void *dst)
return fdesc;
}
-static noinline void execute_location(void *dst, bool write)
+static noinline __nocfi void execute_location(void *dst, bool write)
{
void (*func)(void);
func_desc_t fdesc;
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index aac36750d2c5..c3a6657dcd4a 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -115,6 +115,8 @@
#define MEI_DEV_ID_ARL_S 0x7F68 /* Arrow Lake Point S */
#define MEI_DEV_ID_ARL_H 0x7770 /* Arrow Lake Point H */
+#define MEI_DEV_ID_LNL_M 0xA870 /* Lunar Lake Point M */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index b5757993c9b2..7f59dd38c32f 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -116,12 +116,14 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ARL_H, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LNL_M, MEI_ME_PCH15_CFG)},
+
/* required last entry */
{0, }
};
diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c
index 6c9f00bcb94b..b543e6b9f3cf 100644
--- a/drivers/misc/mei/platform-vsc.c
+++ b/drivers/misc/mei/platform-vsc.c
@@ -400,25 +400,40 @@ static void mei_vsc_remove(struct platform_device *pdev)
static int mei_vsc_suspend(struct device *dev)
{
struct mei_device *mei_dev = dev_get_drvdata(dev);
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
mei_stop(mei_dev);
+ mei_disable_interrupts(mei_dev);
+
+ vsc_tp_free_irq(hw->tp);
+
return 0;
}
static int mei_vsc_resume(struct device *dev)
{
struct mei_device *mei_dev = dev_get_drvdata(dev);
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
int ret;
- ret = mei_restart(mei_dev);
+ ret = vsc_tp_request_irq(hw->tp);
if (ret)
return ret;
+ ret = mei_restart(mei_dev);
+ if (ret)
+ goto err_free;
+
/* start timer if stopped in suspend */
schedule_delayed_work(&mei_dev->timer_work, HZ);
return 0;
+
+err_free:
+ vsc_tp_free_irq(hw->tp);
+
+ return ret;
}
static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume);
diff --git a/drivers/misc/mei/pxp/mei_pxp.c b/drivers/misc/mei/pxp/mei_pxp.c
index b1e4c23b31a3..49abc95677cd 100644
--- a/drivers/misc/mei/pxp/mei_pxp.c
+++ b/drivers/misc/mei/pxp/mei_pxp.c
@@ -236,8 +236,11 @@ static int mei_pxp_component_match(struct device *dev, int subcomponent,
pdev = to_pci_dev(dev);
- if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8) ||
- pdev->vendor != PCI_VENDOR_ID_INTEL)
+ if (pdev->vendor != PCI_VENDOR_ID_INTEL)
+ return 0;
+
+ if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8) &&
+ pdev->class != (PCI_CLASS_DISPLAY_OTHER << 8))
return 0;
if (subcomponent != I915_COMPONENT_PXP)
diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
index ecfb70cd057c..e6a98dba8a73 100644
--- a/drivers/misc/mei/vsc-tp.c
+++ b/drivers/misc/mei/vsc-tp.c
@@ -94,6 +94,27 @@ static const struct acpi_gpio_mapping vsc_tp_acpi_gpios[] = {
{}
};
+static irqreturn_t vsc_tp_isr(int irq, void *data)
+{
+ struct vsc_tp *tp = data;
+
+ atomic_inc(&tp->assert_cnt);
+
+ wake_up(&tp->xfer_wait);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
+{
+ struct vsc_tp *tp = data;
+
+ if (tp->event_notify)
+ tp->event_notify(tp->event_notify_context);
+
+ return IRQ_HANDLED;
+}
+
/* wakeup firmware and wait for response */
static int vsc_tp_wakeup_request(struct vsc_tp *tp)
{
@@ -384,6 +405,37 @@ int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, VSC_TP);
/**
+ * vsc_tp_request_irq - request irq for vsc_tp device
+ * @tp: vsc_tp device handle
+ */
+int vsc_tp_request_irq(struct vsc_tp *tp)
+{
+ struct spi_device *spi = tp->spi;
+ struct device *dev = &spi->dev;
+ int ret;
+
+ irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
+ ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(dev), tp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_request_irq, VSC_TP);
+
+/**
+ * vsc_tp_free_irq - free irq for vsc_tp device
+ * @tp: vsc_tp device handle
+ */
+void vsc_tp_free_irq(struct vsc_tp *tp)
+{
+ free_irq(tp->spi->irq, tp);
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_free_irq, VSC_TP);
+
+/**
* vsc_tp_intr_synchronize - synchronize vsc_tp interrupt
* @tp: vsc_tp device handle
*/
@@ -413,27 +465,6 @@ void vsc_tp_intr_disable(struct vsc_tp *tp)
}
EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, VSC_TP);
-static irqreturn_t vsc_tp_isr(int irq, void *data)
-{
- struct vsc_tp *tp = data;
-
- atomic_inc(&tp->assert_cnt);
-
- return IRQ_WAKE_THREAD;
-}
-
-static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
-{
- struct vsc_tp *tp = data;
-
- wake_up(&tp->xfer_wait);
-
- if (tp->event_notify)
- tp->event_notify(tp->event_notify_context);
-
- return IRQ_HANDLED;
-}
-
static int vsc_tp_match_any(struct acpi_device *adev, void *data)
{
struct acpi_device **__adev = data;
@@ -490,10 +521,9 @@ static int vsc_tp_probe(struct spi_device *spi)
tp->spi = spi;
irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
- ret = devm_request_threaded_irq(dev, spi->irq, vsc_tp_isr,
- vsc_tp_thread_isr,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- dev_name(dev), tp);
+ ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(dev), tp);
if (ret)
return ret;
@@ -522,6 +552,8 @@ static int vsc_tp_probe(struct spi_device *spi)
err_destroy_lock:
mutex_destroy(&tp->mutex);
+ free_irq(spi->irq, tp);
+
return ret;
}
@@ -532,6 +564,8 @@ static void vsc_tp_remove(struct spi_device *spi)
platform_device_unregister(tp->pdev);
mutex_destroy(&tp->mutex);
+
+ free_irq(spi->irq, tp);
}
static const struct acpi_device_id vsc_tp_acpi_ids[] = {
diff --git a/drivers/misc/mei/vsc-tp.h b/drivers/misc/mei/vsc-tp.h
index f9513ddc3e40..14ca195cbddc 100644
--- a/drivers/misc/mei/vsc-tp.h
+++ b/drivers/misc/mei/vsc-tp.h
@@ -37,6 +37,9 @@ int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
void *context);
+int vsc_tp_request_irq(struct vsc_tp *tp);
+void vsc_tp_free_irq(struct vsc_tp *tp);
+
void vsc_tp_intr_enable(struct vsc_tp *tp);
void vsc_tp_intr_disable(struct vsc_tp *tp);
void vsc_tp_intr_synchronize(struct vsc_tp *tp);
diff --git a/drivers/misc/pvpanic/pvpanic-pci.c b/drivers/misc/pvpanic/pvpanic-pci.c
index 9ad20e82785b..b21598a18f6d 100644
--- a/drivers/misc/pvpanic/pvpanic-pci.c
+++ b/drivers/misc/pvpanic/pvpanic-pci.c
@@ -44,8 +44,6 @@ static struct pci_driver pvpanic_pci_driver = {
.name = "pvpanic-pci",
.id_table = pvpanic_pci_id_tbl,
.probe = pvpanic_pci_probe,
- .driver = {
- .dev_groups = pvpanic_dev_groups,
- },
+ .dev_groups = pvpanic_dev_groups,
};
module_pci_driver(pvpanic_pci_driver);
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index b88d6dec209f..9a5f75163aca 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -300,6 +300,7 @@ static void moxart_transfer_pio(struct moxart_host *host)
remain = sgm->length;
if (remain > host->data_len)
remain = host->data_len;
+ sgm->consumed = 0;
if (data->flags & MMC_DATA_WRITE) {
while (remain > 0) {
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 088f8ed4fdc4..a8ee0df47148 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1114,10 +1114,25 @@ static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on,
host = slot->host;
- if (slot->vsd)
- gpiod_set_value(slot->vsd, power_on);
- if (slot->vio)
- gpiod_set_value(slot->vio, power_on);
+ if (power_on) {
+ if (slot->vsd) {
+ gpiod_set_value(slot->vsd, power_on);
+ msleep(1);
+ }
+ if (slot->vio) {
+ gpiod_set_value(slot->vio, power_on);
+ msleep(1);
+ }
+ } else {
+ if (slot->vio) {
+ gpiod_set_value(slot->vio, power_on);
+ msleep(50);
+ }
+ if (slot->vsd) {
+ gpiod_set_value(slot->vsd, power_on);
+ msleep(50);
+ }
+ }
if (slot->pdata->set_power != NULL)
slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on,
@@ -1254,18 +1269,18 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
slot->pdata = &host->pdata->slots[id];
/* Check for some optional GPIO controls */
- slot->vsd = gpiod_get_index_optional(host->dev, "vsd",
- id, GPIOD_OUT_LOW);
+ slot->vsd = devm_gpiod_get_index_optional(host->dev, "vsd",
+ id, GPIOD_OUT_LOW);
if (IS_ERR(slot->vsd))
return dev_err_probe(host->dev, PTR_ERR(slot->vsd),
"error looking up VSD GPIO\n");
- slot->vio = gpiod_get_index_optional(host->dev, "vio",
- id, GPIOD_OUT_LOW);
+ slot->vio = devm_gpiod_get_index_optional(host->dev, "vio",
+ id, GPIOD_OUT_LOW);
if (IS_ERR(slot->vio))
return dev_err_probe(host->dev, PTR_ERR(slot->vio),
"error looking up VIO GPIO\n");
- slot->cover = gpiod_get_index_optional(host->dev, "cover",
- id, GPIOD_IN);
+ slot->cover = devm_gpiod_get_index_optional(host->dev, "cover",
+ id, GPIOD_IN);
if (IS_ERR(slot->cover))
return dev_err_probe(host->dev, PTR_ERR(slot->cover),
"error looking up cover switch GPIO\n");
@@ -1379,13 +1394,6 @@ static int mmc_omap_probe(struct platform_device *pdev)
if (IS_ERR(host->virt_base))
return PTR_ERR(host->virt_base);
- host->slot_switch = gpiod_get_optional(host->dev, "switch",
- GPIOD_OUT_LOW);
- if (IS_ERR(host->slot_switch))
- return dev_err_probe(host->dev, PTR_ERR(host->slot_switch),
- "error looking up slot switch GPIO\n");
-
-
INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
@@ -1404,6 +1412,12 @@ static int mmc_omap_probe(struct platform_device *pdev)
host->dev = &pdev->dev;
platform_set_drvdata(pdev, host);
+ host->slot_switch = devm_gpiod_get_optional(host->dev, "switch",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(host->slot_switch))
+ return dev_err_probe(host->dev, PTR_ERR(host->slot_switch),
+ "error looking up slot switch GPIO\n");
+
host->id = pdev->id;
host->irq = irq;
host->phys_base = res->start;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 668e0aceeeba..e113b99a3eab 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2694,6 +2694,11 @@ static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = true;
+ spin_unlock_irqrestore(&host->lock, flags);
/* Drop the performance vote */
dev_pm_opp_set_rate(dev, 0);
@@ -2708,6 +2713,7 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long flags;
int ret;
ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
@@ -2726,7 +2732,15 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
dev_pm_opp_set_rate(dev, msm_host->clk_rate);
- return sdhci_msm_ice_resume(msm_host);
+ ret = sdhci_msm_ice_resume(msm_host);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = false;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return ret;
}
static const struct dev_pm_ops sdhci_msm_pm_ops = {
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index 1d8f5a76096a..f2e4a93ed1d6 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -626,6 +626,7 @@ static int th1520_execute_tuning(struct sdhci_host *host, u32 opcode)
/* perform tuning */
sdhci_start_tuning(host);
+ host->tuning_loop_count = 128;
host->tuning_err = __sdhci_execute_tuning(host, opcode);
if (host->tuning_err) {
/* disable auto-tuning upon tuning error */
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 025b31aa712c..ef89ec382bfe 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -63,7 +63,7 @@ static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip)
if ((pm_flags & MMC_PM_KEEP_POWER) && (pm_flags & MMC_PM_WAKE_SDIO_IRQ))
return device_wakeup_enable(&chip->pdev->dev);
else if (!cap_cd_wake)
- return device_wakeup_disable(&chip->pdev->dev);
+ device_wakeup_disable(&chip->pdev->dev);
return 0;
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 5887feb347a4..0de87bc63840 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -900,7 +900,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
config.name = compatible;
config.id = NVMEM_DEVID_AUTO;
config.owner = THIS_MODULE;
- config.add_legacy_fixed_of_cells = true;
+ config.add_legacy_fixed_of_cells = !mtd_type_is_nand(mtd);
config.type = NVMEM_TYPE_OTP;
config.root_only = true;
config.ignore_wp = true;
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index a8d12c71f987..1b2ec0fec60c 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -857,7 +857,7 @@ static inline void brcmnand_read_data_bus(struct brcmnand_controller *ctrl,
struct brcmnand_soc *soc = ctrl->soc;
int i;
- if (soc->read_data_bus) {
+ if (soc && soc->read_data_bus) {
soc->read_data_bus(soc, flash_cache, buffer, fc_words);
} else {
for (i = 0; i < fc_words; i++)
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 5243fab9face..8db7fc424571 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -53,7 +53,7 @@ static unsigned long doc_locations[] __initdata = {
0xe8000, 0xea000, 0xec000, 0xee000,
#endif
#endif
- 0xffffffff };
+};
static struct mtd_info *doclist = NULL;
@@ -1554,7 +1554,7 @@ static int __init init_nanddoc(void)
if (ret < 0)
return ret;
} else {
- for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
+ for (i = 0; i < ARRAY_SIZE(doc_locations); i++) {
doc_probe(doc_locations[i]);
}
}
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index b079605c84d3..b8cff9240b28 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2815,7 +2815,7 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
host->cfg0_raw & ~(7 << CW_PER_PAGE));
nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
instrs = 3;
- } else {
+ } else if (q_op.cmd_reg != OP_RESET_DEVICE) {
return 0;
}
@@ -2830,9 +2830,8 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
nandc_set_reg(chip, NAND_EXEC_CMD, 1);
write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
- (q_op.cmd_reg == OP_BLOCK_ERASE) ? write_reg_dma(nandc, NAND_DEV0_CFG0,
- 2, NAND_BAM_NEXT_SGL) : read_reg_dma(nandc,
- NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ if (q_op.cmd_reg == OP_BLOCK_ERASE)
+ write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 1035820c2377..8090390edaf9 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -950,20 +950,173 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
mutex_unlock(&priv->reg_mutex);
}
-/* On page 205, section "8.6.3 Frame filtering" of the active standard, IEEE Std
- * 802.1Qâ„¢-2022, it is stated that frames with 01:80:C2:00:00:00-0F as MAC DA
- * must only be propagated to C-VLAN and MAC Bridge components. That means
- * VLAN-aware and VLAN-unaware bridges. On the switch designs with CPU ports,
- * these frames are supposed to be processed by the CPU (software). So we make
- * the switch only forward them to the CPU port. And if received from a CPU
- * port, forward to a single port. The software is responsible of making the
- * switch conform to the latter by setting a single port as destination port on
- * the special tag.
+/* In Clause 5 of IEEE Std 802-2014, two sublayers of the data link layer (DLL)
+ * of the Open Systems Interconnection basic reference model (OSI/RM) are
+ * described; the medium access control (MAC) and logical link control (LLC)
+ * sublayers. The MAC sublayer is the one facing the physical layer.
*
- * This switch intellectual property cannot conform to this part of the standard
- * fully. Whilst the REV_UN frame tag covers the remaining :04-0D and :0F MAC
- * DAs, it also includes :22-FF which the scope of propagation is not supposed
- * to be restricted for these MAC DAs.
+ * In 8.2 of IEEE Std 802.1Q-2022, the Bridge architecture is described. A
+ * Bridge component comprises a MAC Relay Entity for interconnecting the Ports
+ * of the Bridge, at least two Ports, and higher layer entities with at least a
+ * Spanning Tree Protocol Entity included.
+ *
+ * Each Bridge Port also functions as an end station and shall provide the MAC
+ * Service to an LLC Entity. Each instance of the MAC Service is provided to a
+ * distinct LLC Entity that supports protocol identification, multiplexing, and
+ * demultiplexing, for protocol data unit (PDU) transmission and reception by
+ * one or more higher layer entities.
+ *
+ * It is described in 8.13.9 of IEEE Std 802.1Q-2022 that in a Bridge, the LLC
+ * Entity associated with each Bridge Port is modeled as being directly
+ * connected to the attached Local Area Network (LAN).
+ *
+ * On the switch with CPU port architecture, CPU port functions as Management
+ * Port, and the Management Port functionality is provided by software which
+ * functions as an end station. Software is connected to an IEEE 802 LAN that is
+ * wholly contained within the system that incorporates the Bridge. Software
+ * provides access to the LLC Entity associated with each Bridge Port by the
+ * value of the source port field on the special tag on the frame received by
+ * software.
+ *
+ * We call frames that carry control information to determine the active
+ * topology and current extent of each Virtual Local Area Network (VLAN), i.e.,
+ * spanning tree or Shortest Path Bridging (SPB) and Multiple VLAN Registration
+ * Protocol Data Units (MVRPDUs), and frames from other link constrained
+ * protocols, such as Extensible Authentication Protocol over LAN (EAPOL) and
+ * Link Layer Discovery Protocol (LLDP), link-local frames. They are not
+ * forwarded by a Bridge. Permanently configured entries in the filtering
+ * database (FDB) ensure that such frames are discarded by the Forwarding
+ * Process. In 8.6.3 of IEEE Std 802.1Q-2022, this is described in detail:
+ *
+ * Each of the reserved MAC addresses specified in Table 8-1
+ * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]) shall be
+ * permanently configured in the FDB in C-VLAN components and ERs.
+ *
+ * Each of the reserved MAC addresses specified in Table 8-2
+ * (01-80-C2-00-00-[01,02,03,04,05,06,07,08,09,0A,0E]) shall be permanently
+ * configured in the FDB in S-VLAN components.
+ *
+ * Each of the reserved MAC addresses specified in Table 8-3
+ * (01-80-C2-00-00-[01,02,04,0E]) shall be permanently configured in the FDB in
+ * TPMR components.
+ *
+ * The FDB entries for reserved MAC addresses shall specify filtering for all
+ * Bridge Ports and all VIDs. Management shall not provide the capability to
+ * modify or remove entries for reserved MAC addresses.
+ *
+ * The addresses in Table 8-1, Table 8-2, and Table 8-3 determine the scope of
+ * propagation of PDUs within a Bridged Network, as follows:
+ *
+ * The Nearest Bridge group address (01-80-C2-00-00-0E) is an address that no
+ * conformant Two-Port MAC Relay (TPMR) component, Service VLAN (S-VLAN)
+ * component, Customer VLAN (C-VLAN) component, or MAC Bridge can forward.
+ * PDUs transmitted using this destination address, or any other addresses
+ * that appear in Table 8-1, Table 8-2, and Table 8-3
+ * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]), can
+ * therefore travel no further than those stations that can be reached via a
+ * single individual LAN from the originating station.
+ *
+ * The Nearest non-TPMR Bridge group address (01-80-C2-00-00-03), is an
+ * address that no conformant S-VLAN component, C-VLAN component, or MAC
+ * Bridge can forward; however, this address is relayed by a TPMR component.
+ * PDUs using this destination address, or any of the other addresses that
+ * appear in both Table 8-1 and Table 8-2 but not in Table 8-3
+ * (01-80-C2-00-00-[00,03,05,06,07,08,09,0A,0B,0C,0D,0F]), will be relayed by
+ * any TPMRs but will propagate no further than the nearest S-VLAN component,
+ * C-VLAN component, or MAC Bridge.
+ *
+ * The Nearest Customer Bridge group address (01-80-C2-00-00-00) is an address
+ * that no conformant C-VLAN component, MAC Bridge can forward; however, it is
+ * relayed by TPMR components and S-VLAN components. PDUs using this
+ * destination address, or any of the other addresses that appear in Table 8-1
+ * but not in either Table 8-2 or Table 8-3 (01-80-C2-00-00-[00,0B,0C,0D,0F]),
+ * will be relayed by TPMR components and S-VLAN components but will propagate
+ * no further than the nearest C-VLAN component or MAC Bridge.
+ *
+ * Because the LLC Entity associated with each Bridge Port is provided via CPU
+ * port, we must not filter these frames but forward them to CPU port.
+ *
+ * In a Bridge, the transmission Port is majorly decided by ingress and egress
+ * rules, FDB, and spanning tree Port State functions of the Forwarding Process.
+ * For link-local frames, only CPU port should be designated as destination port
+ * in the FDB, and the other functions of the Forwarding Process must not
+ * interfere with the decision of the transmission Port. We call this process
+ * trapping frames to CPU port.
+ *
+ * Therefore, on the switch with CPU port architecture, link-local frames must
+ * be trapped to CPU port, and certain link-local frames received by a Port of a
+ * Bridge comprising a TPMR component or an S-VLAN component must be excluded
+ * from it.
+ *
+ * A Bridge of the switch with CPU port architecture cannot comprise a Two-Port
+ * MAC Relay (TPMR) component as a TPMR component supports only a subset of the
+ * functionality of a MAC Bridge. A Bridge comprising two Ports (Management Port
+ * doesn't count) of this architecture will either function as a standard MAC
+ * Bridge or a standard VLAN Bridge.
+ *
+ * Therefore, a Bridge of this architecture can only comprise S-VLAN components,
+ * C-VLAN components, or MAC Bridge components. Since there's no TPMR component,
+ * we don't need to relay PDUs using the destination addresses specified on the
+ * Nearest non-TPMR section, and the proportion of the Nearest Customer Bridge
+ * section where they must be relayed by TPMR components.
+ *
+ * One option to trap link-local frames to CPU port is to add static FDB entries
+ * with CPU port designated as destination port. However, because that
+ * Independent VLAN Learning (IVL) is being used on every VID, each entry only
+ * applies to a single VLAN Identifier (VID). For a Bridge comprising a MAC
+ * Bridge component or a C-VLAN component, there would have to be 16 times 4096
+ * entries. This switch intellectual property can only hold a maximum of 2048
+ * entries. Using this option, there also isn't a mechanism to prevent
+ * link-local frames from being discarded when the spanning tree Port State of
+ * the reception Port is discarding.
+ *
+ * The remaining option is to utilise the BPC, RGAC1, RGAC2, RGAC3, and RGAC4
+ * registers. Whilst this applies to every VID, it doesn't contain all of the
+ * reserved MAC addresses without affecting the remaining Standard Group MAC
+ * Addresses. The REV_UN frame tag utilised using the RGAC4 register covers the
+ * remaining 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F] destination
+ * addresses. It also includes the 01-80-C2-00-00-22 to 01-80-C2-00-00-FF
+ * destination addresses which may be relayed by MAC Bridges or VLAN Bridges.
+ * The latter option provides better but not complete conformance.
+ *
+ * This switch intellectual property also does not provide a mechanism to trap
+ * link-local frames with specific destination addresses to CPU port by Bridge,
+ * to conform to the filtering rules for the distinct Bridge components.
+ *
+ * Therefore, regardless of the type of the Bridge component, link-local frames
+ * with these destination addresses will be trapped to CPU port:
+ *
+ * 01-80-C2-00-00-[00,01,02,03,0E]
+ *
+ * In a Bridge comprising a MAC Bridge component or a C-VLAN component:
+ *
+ * Link-local frames with these destination addresses won't be trapped to CPU
+ * port which won't conform to IEEE Std 802.1Q-2022:
+ *
+ * 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F]
+ *
+ * In a Bridge comprising an S-VLAN component:
+ *
+ * Link-local frames with these destination addresses will be trapped to CPU
+ * port which won't conform to IEEE Std 802.1Q-2022:
+ *
+ * 01-80-C2-00-00-00
+ *
+ * Link-local frames with these destination addresses won't be trapped to CPU
+ * port which won't conform to IEEE Std 802.1Q-2022:
+ *
+ * 01-80-C2-00-00-[04,05,06,07,08,09,0A]
+ *
+ * To trap link-local frames to CPU port as conformant as this switch
+ * intellectual property can allow, link-local frames are made to be regarded as
+ * Bridge Protocol Data Units (BPDUs). This is because this switch intellectual
+ * property only lets the frames regarded as BPDUs bypass the spanning tree Port
+ * State function of the Forwarding Process.
+ *
+ * The only remaining interference is the ingress rules. When the reception Port
+ * has no PVID assigned on software, VLAN-untagged frames won't be allowed in.
+ * There doesn't seem to be a mechanism on the switch intellectual property to
+ * have link-local frames bypass this function of the Forwarding Process.
*/
static void
mt753x_trap_frames(struct mt7530_priv *priv)
@@ -971,35 +1124,43 @@ mt753x_trap_frames(struct mt7530_priv *priv)
/* Trap 802.1X PAE frames and BPDUs to the CPU port(s) and egress them
* VLAN-untagged.
*/
- mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_EG_TAG_MASK |
- MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
- MT753X_BPDU_PORT_FW_MASK,
- MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
- MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_BPDU_CPU_ONLY);
+ mt7530_rmw(priv, MT753X_BPC,
+ MT753X_PAE_BPDU_FR | MT753X_PAE_EG_TAG_MASK |
+ MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
+ MT753X_BPDU_PORT_FW_MASK,
+ MT753X_PAE_BPDU_FR |
+ MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+ MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_BPDU_CPU_ONLY);
/* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress
* them VLAN-untagged.
*/
- mt7530_rmw(priv, MT753X_RGAC1, MT753X_R02_EG_TAG_MASK |
- MT753X_R02_PORT_FW_MASK | MT753X_R01_EG_TAG_MASK |
- MT753X_R01_PORT_FW_MASK,
- MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
- MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_BPDU_CPU_ONLY);
+ mt7530_rmw(priv, MT753X_RGAC1,
+ MT753X_R02_BPDU_FR | MT753X_R02_EG_TAG_MASK |
+ MT753X_R02_PORT_FW_MASK | MT753X_R01_BPDU_FR |
+ MT753X_R01_EG_TAG_MASK | MT753X_R01_PORT_FW_MASK,
+ MT753X_R02_BPDU_FR |
+ MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+ MT753X_R01_BPDU_FR |
+ MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_BPDU_CPU_ONLY);
/* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress
* them VLAN-untagged.
*/
- mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_EG_TAG_MASK |
- MT753X_R0E_PORT_FW_MASK | MT753X_R03_EG_TAG_MASK |
- MT753X_R03_PORT_FW_MASK,
- MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
- MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_BPDU_CPU_ONLY);
+ mt7530_rmw(priv, MT753X_RGAC2,
+ MT753X_R0E_BPDU_FR | MT753X_R0E_EG_TAG_MASK |
+ MT753X_R0E_PORT_FW_MASK | MT753X_R03_BPDU_FR |
+ MT753X_R03_EG_TAG_MASK | MT753X_R03_PORT_FW_MASK,
+ MT753X_R0E_BPDU_FR |
+ MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+ MT753X_R03_BPDU_FR |
+ MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_BPDU_CPU_ONLY);
}
static void
@@ -1722,14 +1883,16 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
static int mt753x_mirror_port_get(unsigned int id, u32 val)
{
- return (id == ID_MT7531) ? MT7531_MIRROR_PORT_GET(val) :
- MIRROR_PORT(val);
+ return (id == ID_MT7531 || id == ID_MT7988) ?
+ MT7531_MIRROR_PORT_GET(val) :
+ MIRROR_PORT(val);
}
static int mt753x_mirror_port_set(unsigned int id, u32 val)
{
- return (id == ID_MT7531) ? MT7531_MIRROR_PORT_SET(val) :
- MIRROR_PORT(val);
+ return (id == ID_MT7531 || id == ID_MT7988) ?
+ MT7531_MIRROR_PORT_SET(val) :
+ MIRROR_PORT(val);
}
static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
@@ -2319,6 +2482,9 @@ mt7530_setup(struct dsa_switch *ds)
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
+ /* Allow mirroring frames received on the local port (monitor port). */
+ mt7530_set(priv, MT753X_AGC, LOCAL_EN);
+
/* Setup VLAN ID 0 for VLAN-unaware bridges */
ret = mt7530_setup_vlan0(priv);
if (ret)
@@ -2430,6 +2596,9 @@ mt7531_setup_common(struct dsa_switch *ds)
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
+ /* Allow mirroring frames received on the local port (monitor port). */
+ mt7530_set(priv, MT753X_AGC, LOCAL_EN);
+
/* Flush the FDB table */
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
if (ret < 0)
@@ -2505,18 +2674,25 @@ mt7531_setup(struct dsa_switch *ds)
mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK,
MT7531_GPIO0_INTERRUPT);
- /* Enable PHY core PLL, since phy_device has not yet been created
- * provided for phy_[read,write]_mmd_indirect is called, we provide
- * our own mt7531_ind_mmd_phy_[read,write] to complete this
- * function.
+ /* Enable Energy-Efficient Ethernet (EEE) and PHY core PLL, since
+ * phy_device has not yet been created provided for
+ * phy_[read,write]_mmd_indirect is called, we provide our own
+ * mt7531_ind_mmd_phy_[read,write] to complete this function.
*/
val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR,
MDIO_MMD_VEND2, CORE_PLL_GROUP4);
- val |= MT7531_PHY_PLL_BYPASS_MODE;
+ val |= MT7531_RG_SYSPLL_DMY2 | MT7531_PHY_PLL_BYPASS_MODE;
val &= ~MT7531_PHY_PLL_OFF;
mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
CORE_PLL_GROUP4, val);
+ /* Disable EEE advertisement on the switch PHYs. */
+ for (i = MT753X_CTRL_PHY_ADDR;
+ i < MT753X_CTRL_PHY_ADDR + MT7530_NUM_PHYS; i++) {
+ mt7531_ind_c45_phy_write(priv, i, MDIO_MMD_AN, MDIO_AN_EEE_ADV,
+ 0);
+ }
+
mt7531_setup_common(ds);
/* Setup VLAN ID 0 for VLAN-unaware bridges */
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index d17b318e6ee4..a08053390b28 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -32,6 +32,10 @@ enum mt753x_id {
#define SYSC_REG_RSTCTRL 0x34
#define RESET_MCM BIT(2)
+/* Register for ARL global control */
+#define MT753X_AGC 0xc
+#define LOCAL_EN BIT(7)
+
/* Registers to mac forward control for unknown frames */
#define MT7530_MFC 0x10
#define BC_FFP(x) (((x) & 0xff) << 24)
@@ -65,6 +69,7 @@ enum mt753x_id {
/* Registers for BPDU and PAE frame control*/
#define MT753X_BPC 0x24
+#define MT753X_PAE_BPDU_FR BIT(25)
#define MT753X_PAE_EG_TAG_MASK GENMASK(24, 22)
#define MT753X_PAE_EG_TAG(x) FIELD_PREP(MT753X_PAE_EG_TAG_MASK, x)
#define MT753X_PAE_PORT_FW_MASK GENMASK(18, 16)
@@ -75,20 +80,24 @@ enum mt753x_id {
/* Register for :01 and :02 MAC DA frame control */
#define MT753X_RGAC1 0x28
+#define MT753X_R02_BPDU_FR BIT(25)
#define MT753X_R02_EG_TAG_MASK GENMASK(24, 22)
#define MT753X_R02_EG_TAG(x) FIELD_PREP(MT753X_R02_EG_TAG_MASK, x)
#define MT753X_R02_PORT_FW_MASK GENMASK(18, 16)
#define MT753X_R02_PORT_FW(x) FIELD_PREP(MT753X_R02_PORT_FW_MASK, x)
+#define MT753X_R01_BPDU_FR BIT(9)
#define MT753X_R01_EG_TAG_MASK GENMASK(8, 6)
#define MT753X_R01_EG_TAG(x) FIELD_PREP(MT753X_R01_EG_TAG_MASK, x)
#define MT753X_R01_PORT_FW_MASK GENMASK(2, 0)
/* Register for :03 and :0E MAC DA frame control */
#define MT753X_RGAC2 0x2c
+#define MT753X_R0E_BPDU_FR BIT(25)
#define MT753X_R0E_EG_TAG_MASK GENMASK(24, 22)
#define MT753X_R0E_EG_TAG(x) FIELD_PREP(MT753X_R0E_EG_TAG_MASK, x)
#define MT753X_R0E_PORT_FW_MASK GENMASK(18, 16)
#define MT753X_R0E_PORT_FW(x) FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x)
+#define MT753X_R03_BPDU_FR BIT(9)
#define MT753X_R03_EG_TAG_MASK GENMASK(8, 6)
#define MT753X_R03_EG_TAG(x) FIELD_PREP(MT753X_R03_EG_TAG_MASK, x)
#define MT753X_R03_PORT_FW_MASK GENMASK(2, 0)
@@ -616,6 +625,7 @@ enum mt7531_clk_skew {
#define RG_SYSPLL_DDSFBK_EN BIT(12)
#define RG_SYSPLL_BIAS_EN BIT(11)
#define RG_SYSPLL_BIAS_LPF_EN BIT(10)
+#define MT7531_RG_SYSPLL_DMY2 BIT(6)
#define MT7531_PHY_PLL_OFF BIT(5)
#define MT7531_PHY_PLL_BYPASS_MODE BIT(4)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index c95787cb9086..0918bd6fa81d 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -566,13 +566,61 @@ static void mv88e6xxx_translate_cmode(u8 cmode, unsigned long *supported)
phy_interface_set_rgmii(supported);
}
-static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
- struct phylink_config *config)
+static void
+mv88e6250_setup_supported_interfaces(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
unsigned long *supported = config->supported_interfaces;
+ int err;
+ u16 reg;
- /* Translate the default cmode */
- mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err) {
+ dev_err(chip->dev, "p%d: failed to read port status\n", port);
+ return;
+ }
+
+ switch (reg & MV88E6250_PORT_STS_PORTMODE_MASK) {
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY:
+ __set_bit(PHY_INTERFACE_MODE_REVMII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_HALF:
+ case MV88E6250_PORT_STS_PORTMODE_MII_FULL:
+ __set_bit(PHY_INTERFACE_MODE_MII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY:
+ __set_bit(PHY_INTERFACE_MODE_REVRMII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL:
+ __set_bit(PHY_INTERFACE_MODE_RMII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII:
+ __set_bit(PHY_INTERFACE_MODE_RGMII, supported);
+ break;
+
+ default:
+ dev_err(chip->dev,
+ "p%d: invalid port mode in status register: %04x\n",
+ port, reg);
+ }
+}
+
+static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ if (!mv88e6xxx_phy_is_internal(chip, port))
+ mv88e6250_setup_supported_interfaces(chip, port, config);
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
}
@@ -589,12 +637,12 @@ static void mv88e6351_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
MAC_1000FD;
}
-static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip)
+static int mv88e63xx_get_port_serdes_cmode(struct mv88e6xxx_chip *chip, int port)
{
u16 reg, val;
int err;
- err = mv88e6xxx_port_read(chip, 4, MV88E6XXX_PORT_STS, &reg);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
if (err)
return err;
@@ -603,16 +651,16 @@ static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip)
return 0xf;
val = reg & ~MV88E6XXX_PORT_STS_PHY_DETECT;
- err = mv88e6xxx_port_write(chip, 4, MV88E6XXX_PORT_STS, val);
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, val);
if (err)
return err;
- err = mv88e6xxx_port_read(chip, 4, MV88E6XXX_PORT_STS, &val);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &val);
if (err)
return err;
/* Restore PHY_DETECT value */
- err = mv88e6xxx_port_write(chip, 4, MV88E6XXX_PORT_STS, reg);
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, reg);
if (err)
return err;
@@ -640,7 +688,30 @@ static void mv88e6352_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
if (err <= 0)
return;
- cmode = mv88e6352_get_port4_serdes_cmode(chip);
+ cmode = mv88e63xx_get_port_serdes_cmode(chip, port);
+ if (cmode < 0)
+ dev_err(chip->dev, "p%d: failed to read serdes cmode\n",
+ port);
+ else
+ mv88e6xxx_translate_cmode(cmode, supported);
+ }
+}
+
+static void mv88e632x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ unsigned long *supported = config->supported_interfaces;
+ int cmode;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* Port 0/1 are serdes only ports */
+ if (port == 0 || port == 1) {
+ cmode = mv88e63xx_get_port_serdes_cmode(chip, port);
if (cmode < 0)
dev_err(chip->dev, "p%d: failed to read serdes cmode\n",
port);
@@ -5045,7 +5116,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .phylink_get_caps = mv88e632x_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6321_ops = {
@@ -5091,7 +5162,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .phylink_get_caps = mv88e632x_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6341_ops = {
@@ -5657,7 +5728,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6141,
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6141",
- .num_databases = 4096,
+ .num_databases = 256,
.num_macs = 2048,
.num_ports = 6,
.num_internal_phys = 5,
@@ -6116,7 +6187,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6341",
- .num_databases = 4096,
+ .num_databases = 256,
.num_macs = 2048,
.num_internal_phys = 5,
.num_ports = 6,
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 86deeb347cbc..ddadeb9bfdae 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -25,10 +25,25 @@
#define MV88E6250_PORT_STS_PORTMODE_PHY_100_HALF 0x0900
#define MV88E6250_PORT_STS_PORTMODE_PHY_10_FULL 0x0a00
#define MV88E6250_PORT_STS_PORTMODE_PHY_100_FULL 0x0b00
-#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF 0x0c00
-#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF 0x0d00
-#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL 0x0e00
-#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL 0x0f00
+/* - Modes with PHY suffix use output instead of input clock
+ * - Modes without RMII or RGMII use MII
+ * - Modes without speed do not have a fixed speed specified in the manual
+ * ("DC to x MHz" - variable clock support?)
+ */
+#define MV88E6250_PORT_STS_PORTMODE_MII_DISABLED 0x0000
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII 0x0100
+#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY 0x0200
+#define MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY 0x0400
+#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL 0x0600
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL 0x0700
+#define MV88E6250_PORT_STS_PORTMODE_MII_HALF 0x0800
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY 0x0900
+#define MV88E6250_PORT_STS_PORTMODE_MII_FULL 0x0a00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY 0x0b00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY 0x0c00
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY 0x0d00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY 0x0e00
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY 0x0f00
#define MV88E6XXX_PORT_STS_LINK 0x0800
#define MV88E6XXX_PORT_STS_DUPLEX 0x0400
#define MV88E6XXX_PORT_STS_SPEED_MASK 0x0300
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 9e9e4a03f1a8..2d8a66ea82fa 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -351,7 +351,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
io_sq->bounce_buf_ctrl.next_to_use = 0;
- size = io_sq->bounce_buf_ctrl.buffer_size *
+ size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
io_sq->bounce_buf_ctrl.buffers_num;
dev_node = dev_to_node(ena_dev->dmadev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 09e7da1a69c9..be5acfa41ee0 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -718,8 +718,11 @@ void ena_unmap_tx_buff(struct ena_ring *tx_ring,
static void ena_free_tx_bufs(struct ena_ring *tx_ring)
{
bool print_once = true;
+ bool is_xdp_ring;
u32 i;
+ is_xdp_ring = ENA_IS_XDP_INDEX(tx_ring->adapter, tx_ring->qid);
+
for (i = 0; i < tx_ring->ring_size; i++) {
struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
@@ -739,10 +742,15 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
ena_unmap_tx_buff(tx_ring, tx_info);
- dev_kfree_skb_any(tx_info->skb);
+ if (is_xdp_ring)
+ xdp_return_frame(tx_info->xdpf);
+ else
+ dev_kfree_skb_any(tx_info->skb);
}
- netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->qid));
+
+ if (!is_xdp_ring)
+ netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->qid));
}
static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
@@ -3481,10 +3489,11 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
{
struct ena_ring *tx_ring;
struct ena_ring *rx_ring;
- int i, budget, rc;
+ int qid, budget, rc;
int io_queue_count;
io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
+
/* Make sure the driver doesn't turn the device in other process */
smp_rmb();
@@ -3497,27 +3506,29 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
return;
- budget = ENA_MONITORED_TX_QUEUES;
+ budget = min_t(u32, io_queue_count, ENA_MONITORED_TX_QUEUES);
- for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
- tx_ring = &adapter->tx_ring[i];
- rx_ring = &adapter->rx_ring[i];
+ qid = adapter->last_monitored_tx_qid;
+
+ while (budget) {
+ qid = (qid + 1) % io_queue_count;
+
+ tx_ring = &adapter->tx_ring[qid];
+ rx_ring = &adapter->rx_ring[qid];
rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
if (unlikely(rc))
return;
- rc = !ENA_IS_XDP_INDEX(adapter, i) ?
+ rc = !ENA_IS_XDP_INDEX(adapter, qid) ?
check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
if (unlikely(rc))
return;
budget--;
- if (!budget)
- break;
}
- adapter->last_monitored_tx_qid = i % io_queue_count;
+ adapter->last_monitored_tx_qid = qid;
}
/* trigger napi schedule after 2 consecutive detections */
diff --git a/drivers/net/ethernet/amazon/ena/ena_xdp.c b/drivers/net/ethernet/amazon/ena/ena_xdp.c
index 337c435d3ce9..5b175e7e92a1 100644
--- a/drivers/net/ethernet/amazon/ena/ena_xdp.c
+++ b/drivers/net/ethernet/amazon/ena/ena_xdp.c
@@ -89,7 +89,7 @@ int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
rc = ena_xdp_tx_map_frame(tx_ring, tx_info, xdpf, &ena_tx_ctx);
if (unlikely(rc))
- return rc;
+ goto err;
ena_tx_ctx.req_id = req_id;
@@ -112,7 +112,9 @@ int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
error_unmap_dma:
ena_unmap_tx_buff(tx_ring, tx_info);
+err:
tx_info->xdpf = NULL;
+
return rc;
}
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 9662ee72814c..536635e57727 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -593,6 +593,16 @@ err_out:
pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
}
+void pdsc_pci_reset_thread(struct work_struct *work)
+{
+ struct pdsc *pdsc = container_of(work, struct pdsc, pci_reset_work);
+ struct pci_dev *pdev = pdsc->pdev;
+
+ pci_dev_get(pdev);
+ pci_reset_function(pdev);
+ pci_dev_put(pdev);
+}
+
static void pdsc_check_pci_health(struct pdsc *pdsc)
{
u8 fw_status;
@@ -607,7 +617,8 @@ static void pdsc_check_pci_health(struct pdsc *pdsc)
if (fw_status != PDS_RC_BAD_PCI)
return;
- pci_reset_function(pdsc->pdev);
+ /* prevent deadlock between pdsc_reset_prepare and pdsc_health_thread */
+ queue_work(pdsc->wq, &pdsc->pci_reset_work);
}
void pdsc_health_thread(struct work_struct *work)
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index 92d7657dd614..a3e17a0c187a 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -197,6 +197,7 @@ struct pdsc {
struct pdsc_qcq notifyqcq;
u64 last_eid;
struct pdsc_viftype *viftype_status;
+ struct work_struct pci_reset_work;
};
/** enum pds_core_dbell_bits - bitwise composition of dbell values.
@@ -313,5 +314,6 @@ int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw,
void pdsc_fw_down(struct pdsc *pdsc);
void pdsc_fw_up(struct pdsc *pdsc);
+void pdsc_pci_reset_thread(struct work_struct *work);
#endif /* _PDSC_H_ */
diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
index e494e1298dc9..495ef4ef8c10 100644
--- a/drivers/net/ethernet/amd/pds_core/dev.c
+++ b/drivers/net/ethernet/amd/pds_core/dev.c
@@ -229,6 +229,9 @@ int pdsc_devcmd_reset(struct pdsc *pdsc)
.reset.opcode = PDS_CORE_CMD_RESET,
};
+ if (!pdsc_is_fw_running(pdsc))
+ return 0;
+
return pdsc_devcmd(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
}
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index ab6133e7db42..660268ff9562 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -239,6 +239,7 @@ static int pdsc_init_pf(struct pdsc *pdsc)
snprintf(wq_name, sizeof(wq_name), "%s.%d", PDS_CORE_DRV_NAME, pdsc->uid);
pdsc->wq = create_singlethread_workqueue(wq_name);
INIT_WORK(&pdsc->health_work, pdsc_health_thread);
+ INIT_WORK(&pdsc->pci_reset_work, pdsc_pci_reset_thread);
timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0);
pdsc->wdtimer_period = PDSC_WATCHDOG_SECS * HZ;
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index 72ea97c5d5d4..82768b0e9026 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -436,10 +436,8 @@ static void umac_init(struct bcmasp_intf *intf)
umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
}
-static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
+static int bcmasp_tx_reclaim(struct bcmasp_intf *intf)
{
- struct bcmasp_intf *intf =
- container_of(napi, struct bcmasp_intf, tx_napi);
struct bcmasp_intf_stats64 *stats = &intf->stats64;
struct device *kdev = &intf->parent->pdev->dev;
unsigned long read, released = 0;
@@ -482,10 +480,16 @@ static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
DESC_RING_COUNT);
}
- /* Ensure all descriptors have been written to DRAM for the hardware
- * to see updated contents.
- */
- wmb();
+ return released;
+}
+
+static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmasp_intf *intf =
+ container_of(napi, struct bcmasp_intf, tx_napi);
+ int released = 0;
+
+ released = bcmasp_tx_reclaim(intf);
napi_complete(&intf->tx_napi);
@@ -797,6 +801,7 @@ static void bcmasp_init_tx(struct bcmasp_intf *intf)
intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
intf->tx_spb_index = 0;
intf->tx_spb_clean_index = 0;
+ memset(intf->tx_cbs, 0, sizeof(struct bcmasp_tx_cb) * DESC_RING_COUNT);
/* Make sure channels are disabled */
tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
@@ -885,6 +890,8 @@ static void bcmasp_netif_deinit(struct net_device *dev)
} while (timeout-- > 0);
tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
+ bcmasp_tx_reclaim(intf);
+
umac_enable_set(intf, UMC_CMD_TX_EN, 0);
phy_stop(dev->phydev);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 3e4fb3c3e834..1be6d14030bc 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2009,12 +2009,14 @@ static int b44_set_pauseparam(struct net_device *dev,
bp->flags |= B44_FLAG_TX_PAUSE;
else
bp->flags &= ~B44_FLAG_TX_PAUSE;
- if (bp->flags & B44_FLAG_PAUSE_AUTO) {
- b44_halt(bp);
- b44_init_rings(bp);
- b44_init_hw(bp, B44_FULL_RESET);
- } else {
- __b44_set_flow_ctrl(bp, bp->flags);
+ if (netif_running(dev)) {
+ if (bp->flags & B44_FLAG_PAUSE_AUTO) {
+ b44_halt(bp);
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET);
+ } else {
+ __b44_set_flow_ctrl(bp, bp->flags);
+ }
}
spin_unlock_irq(&bp->lock);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 493b724848c8..2c2ee79c4d77 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1778,7 +1778,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
} else {
@@ -1788,7 +1788,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
if (!new_data) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
@@ -1804,7 +1804,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (!skb) {
skb_free_frag(data);
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
skb_reserve(skb, bp->rx_offset);
@@ -1815,7 +1815,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
}
@@ -2094,11 +2094,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
cp_cons, agg_bufs,
false);
- if (!frag_len) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!frag_len)
+ goto oom_next_rx;
}
xdp_active = true;
}
@@ -2121,9 +2118,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
else
bnxt_xdp_buff_frags_free(rxr, &xdp);
}
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ goto oom_next_rx;
}
} else {
u32 payload;
@@ -2134,29 +2129,21 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
payload = 0;
skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
payload | len);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!skb)
+ goto oom_next_rx;
}
if (agg_bufs) {
if (!xdp_active) {
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!skb)
+ goto oom_next_rx;
} else {
skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
if (!skb) {
/* we should be able to free the old skb here */
bnxt_xdp_buff_frags_free(rxr, &xdp);
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ goto oom_next_rx;
}
}
}
@@ -2234,6 +2221,11 @@ next_rx_no_prod_no_len:
*raw_cons = tmp_raw_cons;
return rc;
+
+oom_next_rx:
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
}
/* In netpoll mode, if we are using a combined completion ring, we need to
@@ -2280,7 +2272,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
}
rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
if (rc && rc != -EBUSY)
- cpr->sw_stats.rx.rx_netpoll_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_netpoll_discards += 1;
return rc;
}
@@ -9089,7 +9081,7 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
BNXT_FW_HEALTH_WIN_BASE +
BNXT_GRC_REG_CHIP_NUM);
}
- if (!BNXT_CHIP_P5(bp))
+ if (!BNXT_CHIP_P5_PLUS(bp))
return;
status_loc = BNXT_GRC_REG_STATUS_P5 |
@@ -11758,6 +11750,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
+ if (bp->ptp_cfg)
+ atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
bnxt_cfg_usr_fltrs(bp);
@@ -13035,6 +13029,16 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
bnxt_rtnl_unlock_sp(bp);
}
+static void bnxt_fw_fatal_close(struct bnxt *bp)
+{
+ bnxt_tx_disable(bp);
+ bnxt_disable_napi(bp);
+ bnxt_disable_int_sync(bp);
+ bnxt_free_irq(bp);
+ bnxt_clear_int_mode(bp);
+ pci_disable_device(bp->pdev);
+}
+
static void bnxt_fw_reset_close(struct bnxt *bp)
{
bnxt_ulp_stop(bp);
@@ -13048,12 +13052,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
if (val == 0xffff)
bp->fw_reset_min_dsecs = 0;
- bnxt_tx_disable(bp);
- bnxt_disable_napi(bp);
- bnxt_disable_int_sync(bp);
- bnxt_free_irq(bp);
- bnxt_clear_int_mode(bp);
- pci_disable_device(bp->pdev);
+ bnxt_fw_fatal_close(bp);
}
__bnxt_close_nic(bp, true, false);
bnxt_vf_reps_free(bp);
@@ -15371,6 +15370,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
+ bool abort = false;
netdev_info(netdev, "PCI I/O error detected\n");
@@ -15379,16 +15379,27 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
bnxt_ulp_stop(bp);
- if (state == pci_channel_io_perm_failure) {
+ if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ netdev_err(bp->dev, "Firmware reset already in progress\n");
+ abort = true;
+ }
+
+ if (abort || state == pci_channel_io_perm_failure) {
rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
}
- if (state == pci_channel_io_frozen)
+ /* Link is not reliable anymore if state is pci_channel_io_frozen
+ * so we disable bus master to prevent any potential bad DMAs before
+ * freeing kernel memory.
+ */
+ if (state == pci_channel_io_frozen) {
set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
+ bnxt_fw_fatal_close(bp);
+ }
if (netif_running(netdev))
- bnxt_close(netdev);
+ __bnxt_close_nic(bp, true, true);
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
@@ -15472,6 +15483,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
}
reset_exit:
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_clear_reservations(bp, true);
rtnl_unlock();
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 93f9bd55020f..195c02dc0683 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -210,6 +210,9 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
if (err)
return;
+ if (edev->ulp_tbl->msix_requested)
+ bnxt_fill_msix_vecs(bp, edev->msix_entries);
+
if (aux_priv) {
struct auxiliary_device *adev;
@@ -392,12 +395,13 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp)
if (!edev)
goto aux_dev_uninit;
+ aux_priv->edev = edev;
+
ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
if (!ulp)
goto aux_dev_uninit;
edev->ulp_tbl = ulp;
- aux_priv->edev = edev;
bp->edev = edev;
bnxt_set_edev_info(edev, bp);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index b1f84b37032a..c7e7dac057a3 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet: " fmt
@@ -2467,14 +2467,18 @@ static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
{
u32 reg;
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- if (reg & CMD_SW_RESET)
+ if (reg & CMD_SW_RESET) {
+ spin_unlock_bh(&priv->reg_lock);
return;
+ }
if (enable)
reg |= mask;
else
reg &= ~mask;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
/* UniMAC stops on a packet boundary, wait for a full-size packet
* to be processed
@@ -2490,8 +2494,10 @@ static void reset_umac(struct bcmgenet_priv *priv)
udelay(10);
/* issue soft reset and disable MAC while updating its registers */
+ spin_lock_bh(&priv->reg_lock);
bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
udelay(2);
+ spin_unlock_bh(&priv->reg_lock);
}
static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
@@ -3334,7 +3340,9 @@ static void bcmgenet_netif_start(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
/* Start the network engine */
+ netif_addr_lock_bh(dev);
bcmgenet_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
bcmgenet_enable_rx_napi(priv);
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
@@ -3595,16 +3603,19 @@ static void bcmgenet_set_rx_mode(struct net_device *dev)
* 3. The number of filters needed exceeds the number filters
* supported by the hardware.
*/
+ spin_lock(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
(nfilter > MAX_MDF_FILTER)) {
reg |= CMD_PROMISC;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock(&priv->reg_lock);
bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
return;
} else {
reg &= ~CMD_PROMISC;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock(&priv->reg_lock);
}
/* update MDF filter */
@@ -4003,6 +4014,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
goto err;
}
+ spin_lock_init(&priv->reg_lock);
spin_lock_init(&priv->lock);
/* Set default pause parameters */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 7523b60b3c1c..43b923c48b14 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#ifndef __BCMGENET_H__
@@ -573,6 +573,8 @@ struct bcmgenet_rxnfc_rule {
/* device context */
struct bcmgenet_priv {
void __iomem *base;
+ /* reg_lock: lock to serialize access to shared registers */
+ spinlock_t reg_lock;
enum bcmgenet_version version;
struct net_device *dev;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 7a41cad5788f..1248792d7fd4 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet_wol: " fmt
@@ -151,6 +151,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
}
/* Can't suspend with WoL if MAC is still in reset */
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
if (reg & CMD_SW_RESET)
reg &= ~CMD_SW_RESET;
@@ -158,6 +159,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
/* disable RX */
reg &= ~CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
mdelay(10);
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
@@ -203,6 +205,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
}
/* Enable CRC forward */
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
priv->crc_fwd_en = 1;
reg |= CMD_CRC_FWD;
@@ -210,6 +213,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
/* Receiver must be enabled for WOL MP detection */
reg |= CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
reg = UMAC_IRQ_MPD_R;
if (hfb_enable)
@@ -256,7 +260,9 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
}
/* Disable CRC Forward */
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~CMD_CRC_FWD;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 9ada89355747..c4a3698cef66 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET MDIO routines
*
- * Copyright (c) 2014-2017 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#include <linux/acpi.h>
@@ -76,6 +76,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
reg |= RGMII_LINK;
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
CMD_HD_EN |
@@ -88,6 +89,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
reg |= CMD_TX_EN | CMD_RX_EN;
}
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
active = phy_init_eee(phydev, 0) >= 0;
bcmgenet_eee_enable_set(dev,
@@ -275,6 +277,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
* block for the interface to work, unconditionally clear the
* Out-of-band disable since we do not need it.
*/
+ mutex_lock(&phydev->lock);
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
reg &= ~OOB_DISABLE;
if (priv->ext_phy) {
@@ -286,6 +289,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
reg |= RGMII_MODE_EN;
}
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ mutex_unlock(&phydev->lock);
if (init)
dev_info(kdev, "configuring instance for %s\n", phy_name);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 7246e13dd559..97291bfbeea5 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -312,7 +312,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
void *kern_buf;
/* Copy the user space buf */
- kern_buf = memdup_user(buf, nbytes);
+ kern_buf = memdup_user_nul(buf, nbytes);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
@@ -372,7 +372,7 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
void *kern_buf;
/* Copy the user space buf */
- kern_buf = memdup_user(buf, nbytes);
+ kern_buf = memdup_user_nul(buf, nbytes);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 49d5808b7d11..de52bcb884c4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2670,12 +2670,12 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
lb->loopback = 1;
q = &adap->sge.ethtxq[pi->first_qset];
- __netif_tx_lock(q->txq, smp_processor_id());
+ __netif_tx_lock_bh(q->txq);
reclaim_completed_tx(adap, &q->q, -1, true);
credits = txq_avail(&q->q) - ndesc;
if (unlikely(credits < 0)) {
- __netif_tx_unlock(q->txq);
+ __netif_tx_unlock_bh(q->txq);
return -ENOMEM;
}
@@ -2710,7 +2710,7 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
init_completion(&lb->completion);
txq_advance(&q->q, ndesc);
cxgb4_ring_tx_db(adap, &q->q, ndesc);
- __netif_tx_unlock(q->txq);
+ __netif_tx_unlock_bh(q->txq);
/* wait for the pkt to return */
ret = wait_for_completion_timeout(&lb->completion, 10 * HZ);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index f19f1e1d1f9f..133c94646c21 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -897,7 +897,7 @@ struct hnae3_handle {
struct hnae3_roce_private_info rinfo;
};
- u32 numa_node_mask; /* for multi-chip support */
+ nodemask_t numa_node_mask; /* for multi-chip support */
enum hnae3_port_base_vlan_state port_base_vlan_state;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index ff6a2ed23ddb..ce60332d83c3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -1537,6 +1537,9 @@ static int hclge_configure(struct hclge_dev *hdev)
cfg.default_speed, ret);
return ret;
}
+ hdev->hw.mac.req_speed = hdev->hw.mac.speed;
+ hdev->hw.mac.req_autoneg = AUTONEG_ENABLE;
+ hdev->hw.mac.req_duplex = DUPLEX_FULL;
hclge_parse_link_mode(hdev, cfg.speed_ability);
@@ -1766,7 +1769,8 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
nic->pdev = hdev->pdev;
nic->ae_algo = &ae_algo;
- nic->numa_node_mask = hdev->numa_node_mask;
+ bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits,
+ MAX_NUMNODES);
nic->kinfo.io_base = hdev->hw.hw.io_base;
ret = hclge_knic_setup(vport, num_tqps,
@@ -2458,7 +2462,8 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport)
roce->pdev = nic->pdev;
roce->ae_algo = nic->ae_algo;
- roce->numa_node_mask = nic->numa_node_mask;
+ bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits,
+ MAX_NUMNODES);
return 0;
}
@@ -3342,9 +3347,9 @@ hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
return ret;
}
- hdev->hw.mac.autoneg = cmd->base.autoneg;
- hdev->hw.mac.speed = cmd->base.speed;
- hdev->hw.mac.duplex = cmd->base.duplex;
+ hdev->hw.mac.req_autoneg = cmd->base.autoneg;
+ hdev->hw.mac.req_speed = cmd->base.speed;
+ hdev->hw.mac.req_duplex = cmd->base.duplex;
linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
return 0;
@@ -3377,9 +3382,9 @@ static int hclge_tp_port_init(struct hclge_dev *hdev)
if (!hnae3_dev_phy_imp_supported(hdev))
return 0;
- cmd.base.autoneg = hdev->hw.mac.autoneg;
- cmd.base.speed = hdev->hw.mac.speed;
- cmd.base.duplex = hdev->hw.mac.duplex;
+ cmd.base.autoneg = hdev->hw.mac.req_autoneg;
+ cmd.base.speed = hdev->hw.mac.req_speed;
+ cmd.base.duplex = hdev->hw.mac.req_duplex;
linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
@@ -7952,8 +7957,7 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
/* Set the DOWN flag here to disable link updating */
set_bit(HCLGE_STATE_DOWN, &hdev->state);
- /* flush memory to make sure DOWN is seen by service task */
- smp_mb__before_atomic();
+ smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */
hclge_flush_link_update(hdev);
}
}
@@ -9906,6 +9910,7 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
static int hclge_init_vlan_filter(struct hclge_dev *hdev)
{
struct hclge_vport *vport;
+ bool enable = true;
int ret;
int i;
@@ -9925,8 +9930,12 @@ static int hclge_init_vlan_filter(struct hclge_dev *hdev)
vport->cur_vlan_fltr_en = true;
}
+ if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps) &&
+ !test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
+ enable = false;
+
return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
- HCLGE_FILTER_FE_INGRESS, true, 0);
+ HCLGE_FILTER_FE_INGRESS, enable, 0);
}
static int hclge_init_vlan_type(struct hclge_dev *hdev)
@@ -11622,16 +11631,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto out;
- ret = hclge_devlink_init(hdev);
- if (ret)
- goto err_pci_uninit;
-
- devl_lock(hdev->devlink);
-
/* Firmware command queue initialize */
ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
if (ret)
- goto err_devlink_uninit;
+ goto err_pci_uninit;
/* Firmware command initialize */
ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
@@ -11759,7 +11762,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
ret = hclge_update_port_info(hdev);
if (ret)
- goto err_mdiobus_unreg;
+ goto err_ptp_uninit;
INIT_KFIFO(hdev->mac_tnl_log);
@@ -11799,6 +11802,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
dev_warn(&pdev->dev,
"failed to wake on lan init, ret = %d\n", ret);
+ ret = hclge_devlink_init(hdev);
+ if (ret)
+ goto err_ptp_uninit;
+
hclge_state_init(hdev);
hdev->last_reset_time = jiffies;
@@ -11806,10 +11813,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
HCLGE_DRIVER_NAME);
hclge_task_schedule(hdev, round_jiffies_relative(HZ));
-
- devl_unlock(hdev->devlink);
return 0;
+err_ptp_uninit:
+ hclge_ptp_uninit(hdev);
err_mdiobus_unreg:
if (hdev->hw.mac.phydev)
mdiobus_unregister(hdev->hw.mac.mdio_bus);
@@ -11819,9 +11826,6 @@ err_msi_uninit:
pci_free_irq_vectors(pdev);
err_cmd_uninit:
hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
-err_devlink_uninit:
- devl_unlock(hdev->devlink);
- hclge_devlink_uninit(hdev);
err_pci_uninit:
pcim_iounmap(pdev, hdev->hw.hw.io_base);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index e821dd2f1528..3a9186457ad8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -279,11 +279,14 @@ struct hclge_mac {
u8 media_type; /* port media type, e.g. fibre/copper/backplane */
u8 mac_addr[ETH_ALEN];
u8 autoneg;
+ u8 req_autoneg;
u8 duplex;
+ u8 req_duplex;
u8 support_autoneg;
u8 speed_type; /* 0: sfp speed, 1: active speed */
u8 lane_num;
u32 speed;
+ u32 req_speed;
u32 max_speed;
u32 speed_ability; /* speed ability supported by current media */
u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */
@@ -891,7 +894,7 @@ struct hclge_dev {
u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
u16 num_alloc_vport; /* Num vports this driver supports */
- u32 numa_node_mask;
+ nodemask_t numa_node_mask;
u16 rx_buf_len;
u16 num_tx_desc; /* desc num of per tx queue */
u16 num_rx_desc; /* desc num of per rx queue */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index d4a0e0be7a72..59c863306657 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -1077,12 +1077,13 @@ static void hclge_mbx_request_handling(struct hclge_mbx_ops_param *param)
hdev = param->vport->back;
cmd_func = hclge_mbx_ops_list[param->req->msg.code];
- if (cmd_func)
- ret = cmd_func(param);
- else
+ if (!cmd_func) {
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %u\n",
param->req->msg.code);
+ return;
+ }
+ ret = cmd_func(param);
/* PF driver should not reply IMP */
if (hnae3_get_bit(param->req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 0aa9beefd1c7..43ee20eb03d1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -412,7 +412,8 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
nic->ae_algo = &ae_algovf;
nic->pdev = hdev->pdev;
- nic->numa_node_mask = hdev->numa_node_mask;
+ bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits,
+ MAX_NUMNODES);
nic->flags |= HNAE3_SUPPORT_VF;
nic->kinfo.io_base = hdev->hw.hw.io_base;
@@ -2082,8 +2083,8 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
roce->pdev = nic->pdev;
roce->ae_algo = nic->ae_algo;
- roce->numa_node_mask = nic->numa_node_mask;
-
+ bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits,
+ MAX_NUMNODES);
return 0;
}
@@ -2180,8 +2181,7 @@ static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
} else {
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
- /* flush memory to make sure DOWN is seen by service task */
- smp_mb__before_atomic();
+ smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */
hclgevf_flush_link_update(hdev);
}
}
@@ -2845,10 +2845,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret)
return ret;
- ret = hclgevf_devlink_init(hdev);
- if (ret)
- goto err_devlink_init;
-
ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
if (ret)
goto err_cmd_queue_init;
@@ -2941,6 +2937,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
hclgevf_init_rxd_adv_layout(hdev);
+ ret = hclgevf_devlink_init(hdev);
+ if (ret)
+ goto err_config;
+
set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
hdev->last_reset_time = jiffies;
@@ -2960,8 +2960,6 @@ err_misc_irq_init:
err_cmd_init:
hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
err_cmd_queue_init:
- hclgevf_devlink_uninit(hdev);
-err_devlink_init:
hclgevf_pci_uninit(hdev);
clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index a73f2bf3a56a..cccef3228461 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -236,7 +236,7 @@ struct hclgevf_dev {
u16 rss_size_max; /* HW defined max RSS task queue */
u16 num_alloc_vport; /* num vports this driver supports */
- u32 numa_node_mask;
+ nodemask_t numa_node_mask;
u16 rx_buf_len;
u16 num_tx_desc; /* desc num of per tx queue */
u16 num_rx_desc; /* desc num of per rx queue */
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 93544f1cc2a5..f7ae0e0aa4a4 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -157,7 +157,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
* the lower time out
*/
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- usleep_range(50, 60);
+ udelay(50);
mdic = er32(MDIC);
if (mdic & E1000_MDIC_READY)
break;
@@ -181,7 +181,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
* reading duplicate data in the next MDIC transaction.
*/
if (hw->mac.type == e1000_pch2lan)
- usleep_range(100, 150);
+ udelay(100);
if (success) {
*data = (u16)mdic;
@@ -237,7 +237,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
* the lower time out
*/
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- usleep_range(50, 60);
+ udelay(50);
mdic = er32(MDIC);
if (mdic & E1000_MDIC_READY)
break;
@@ -261,7 +261,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
* reading duplicate data in the next MDIC transaction.
*/
if (hw->mac.type == e1000_pch2lan)
- usleep_range(100, 150);
+ udelay(100);
if (success)
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 48b9ddb2b1b3..ffb9f9f15c52 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -16107,8 +16107,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
val = FIELD_GET(I40E_PRTGL_SAH_MFS_MASK,
rd32(&pf->hw, I40E_PRTGL_SAH));
if (val < MAX_FRAME_SIZE_DEFAULT)
- dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
- pf->hw.port, val);
+ dev_warn(&pdev->dev, "MFS for port %x (%d) has been set below the default (%d)\n",
+ pf->hw.port, val, MAX_FRAME_SIZE_DEFAULT);
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
@@ -16650,7 +16650,7 @@ static int __init i40e_init_module(void)
* since we need to be able to guarantee forward progress even under
* memory pressure.
*/
- i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
+ i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name);
if (!i40e_wq) {
pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index ef2440f3abf8..166832a4213a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -3503,6 +3503,34 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
}
/**
+ * iavf_is_tc_config_same - Compare the mqprio TC config with the
+ * TC config already configured on this adapter.
+ * @adapter: board private structure
+ * @mqprio_qopt: TC config received from kernel.
+ *
+ * This function compares the TC config received from the kernel
+ * with the config already configured on the adapter.
+ *
+ * Return: True if configuration is same, false otherwise.
+ **/
+static bool iavf_is_tc_config_same(struct iavf_adapter *adapter,
+ struct tc_mqprio_qopt *mqprio_qopt)
+{
+ struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0];
+ int i;
+
+ if (adapter->num_tc != mqprio_qopt->num_tc)
+ return false;
+
+ for (i = 0; i < adapter->num_tc; i++) {
+ if (ch[i].count != mqprio_qopt->count[i] ||
+ ch[i].offset != mqprio_qopt->offset[i])
+ return false;
+ }
+ return true;
+}
+
+/**
* __iavf_setup_tc - configure multiple traffic classes
* @netdev: network interface device structure
* @type_data: tc offload data
@@ -3559,7 +3587,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
if (ret)
return ret;
/* Return if same TC config is requested */
- if (adapter->num_tc == num_tc)
+ if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt))
return 0;
adapter->num_tc = num_tc;
diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c
index d252d98218d0..9fc0fd95a13d 100644
--- a/drivers/net/ethernet/intel/ice/ice_debugfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c
@@ -171,7 +171,7 @@ ice_debugfs_module_write(struct file *filp, const char __user *buf,
if (*ppos != 0 || count > 8)
return -EINVAL;
- cmd_buf = memdup_user(buf, count);
+ cmd_buf = memdup_user_nul(buf, count);
if (IS_ERR(cmd_buf))
return PTR_ERR(cmd_buf);
@@ -257,7 +257,7 @@ ice_debugfs_nr_messages_write(struct file *filp, const char __user *buf,
if (*ppos != 0 || count > 4)
return -EINVAL;
- cmd_buf = memdup_user(buf, count);
+ cmd_buf = memdup_user_nul(buf, count);
if (IS_ERR(cmd_buf))
return PTR_ERR(cmd_buf);
@@ -332,7 +332,7 @@ ice_debugfs_enable_write(struct file *filp, const char __user *buf,
if (*ppos != 0 || count > 2)
return -EINVAL;
- cmd_buf = memdup_user(buf, count);
+ cmd_buf = memdup_user_nul(buf, count);
if (IS_ERR(cmd_buf))
return PTR_ERR(cmd_buf);
@@ -428,7 +428,7 @@ ice_debugfs_log_size_write(struct file *filp, const char __user *buf,
if (*ppos != 0 || count > 5)
return -EINVAL;
- cmd_buf = memdup_user(buf, count);
+ cmd_buf = memdup_user_nul(buf, count);
if (IS_ERR(cmd_buf))
return PTR_ERR(cmd_buf);
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index b890410a2bc0..688ccb0615ab 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -28,6 +28,8 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
* - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified)
* - Tunnel flag (present if tunnel)
*/
+ if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
+ lkups_cnt++;
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
lkups_cnt++;
@@ -363,6 +365,11 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
/* Always add direction metadata */
ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
+ if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
+ ice_rule_add_src_vsi_metadata(&list[i]);
+ i++;
+ }
+
rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
if (tc_fltr->tunnel_type != TNL_LAST) {
i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i);
@@ -772,7 +779,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
int ret;
int i;
- if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
+ if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
return -EOPNOTSUPP;
}
@@ -820,6 +827,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
/* specify the cookie as filter_rule_id */
rule_info.fltr_rule_id = fltr->cookie;
+ rule_info.src_vsi = vsi->idx;
ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
if (ret == -EEXIST) {
@@ -1481,7 +1489,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
(BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
- BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
return -EOPNOTSUPP;
} else {
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 21d26e19338a..d10a4be965b5 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -856,6 +856,11 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
return 0;
}
+ if (flags & ICE_VF_RESET_LOCK)
+ mutex_lock(&vf->cfg_lock);
+ else
+ lockdep_assert_held(&vf->cfg_lock);
+
lag = pf->lag;
mutex_lock(&pf->lag_mutex);
if (lag && lag->bonded && lag->primary) {
@@ -867,11 +872,6 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
act_prt = ICE_LAG_INVALID_PORT;
}
- if (flags & ICE_VF_RESET_LOCK)
- mutex_lock(&vf->cfg_lock);
- else
- lockdep_assert_held(&vf->cfg_lock);
-
if (ice_is_vf_disabled(vf)) {
vsi = ice_get_vf_vsi(vf);
if (!vsi) {
@@ -956,14 +956,14 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
ice_mbx_clear_malvf(&vf->mbx_info);
out_unlock:
- if (flags & ICE_VF_RESET_LOCK)
- mutex_unlock(&vf->cfg_lock);
-
if (lag && lag->bonded && lag->primary &&
act_prt != ICE_LAG_INVALID_PORT)
ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
mutex_unlock(&pf->lag_mutex);
+ if (flags & ICE_VF_RESET_LOCK)
+ mutex_unlock(&vf->cfg_lock);
+
return err;
}
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 90316dc58630..6bc56c7c181e 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -298,6 +298,7 @@ struct igc_adapter {
/* LEDs */
struct mutex led_mutex;
+ struct igc_led_classdev *leds;
};
void igc_up(struct igc_adapter *adapter);
@@ -723,6 +724,7 @@ void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts);
void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter);
int igc_led_setup(struct igc_adapter *adapter);
+void igc_led_free(struct igc_adapter *adapter);
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
diff --git a/drivers/net/ethernet/intel/igc/igc_leds.c b/drivers/net/ethernet/intel/igc/igc_leds.c
index bf240c5daf86..3929b25b6ae6 100644
--- a/drivers/net/ethernet/intel/igc/igc_leds.c
+++ b/drivers/net/ethernet/intel/igc/igc_leds.c
@@ -236,8 +236,8 @@ static void igc_led_get_name(struct igc_adapter *adapter, int index, char *buf,
pci_dev_id(adapter->pdev), index);
}
-static void igc_setup_ldev(struct igc_led_classdev *ldev,
- struct net_device *netdev, int index)
+static int igc_setup_ldev(struct igc_led_classdev *ldev,
+ struct net_device *netdev, int index)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct led_classdev *led_cdev = &ldev->led;
@@ -257,24 +257,46 @@ static void igc_setup_ldev(struct igc_led_classdev *ldev,
led_cdev->hw_control_get = igc_led_hw_control_get;
led_cdev->hw_control_get_device = igc_led_hw_control_get_device;
- devm_led_classdev_register(&netdev->dev, led_cdev);
+ return led_classdev_register(&netdev->dev, led_cdev);
}
int igc_led_setup(struct igc_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct device *dev = &netdev->dev;
struct igc_led_classdev *leds;
- int i;
+ int i, err;
mutex_init(&adapter->led_mutex);
- leds = devm_kcalloc(dev, IGC_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ leds = kcalloc(IGC_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
if (!leds)
return -ENOMEM;
- for (i = 0; i < IGC_NUM_LEDS; i++)
- igc_setup_ldev(leds + i, netdev, i);
+ for (i = 0; i < IGC_NUM_LEDS; i++) {
+ err = igc_setup_ldev(leds + i, netdev, i);
+ if (err)
+ goto err;
+ }
+
+ adapter->leds = leds;
return 0;
+
+err:
+ for (i--; i >= 0; i--)
+ led_classdev_unregister(&((leds + i)->led));
+
+ kfree(leds);
+ return err;
+}
+
+void igc_led_free(struct igc_adapter *adapter)
+{
+ struct igc_led_classdev *leds = adapter->leds;
+ int i;
+
+ for (i = 0; i < IGC_NUM_LEDS; i++)
+ led_classdev_unregister(&((leds + i)->led));
+
+ kfree(leds);
}
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 35ad40a803cb..4d975d620a8e 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -7021,6 +7021,9 @@ static void igc_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->watchdog_task);
hrtimer_cancel(&adapter->hrtimer);
+ if (IS_ENABLED(CONFIG_IGC_LEDS))
+ igc_led_free(adapter);
+
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 2500f5ba4f5a..881d704644fb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -999,12 +999,10 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
u16 pcifunc;
int ret, lf;
- cmd_buf = memdup_user(buffer, count + 1);
+ cmd_buf = memdup_user_nul(buffer, count);
if (IS_ERR(cmd_buf))
return -ENOMEM;
- cmd_buf[count] = '\0';
-
cmd_buf_tmp = strchr(cmd_buf, '\n');
if (cmd_buf_tmp) {
*cmd_buf_tmp = '\0';
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index d39001cdc707..00af8888e329 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -4819,18 +4819,18 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
*/
rvu_write64(rvu, blkaddr, NIX_AF_CFG,
rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
+ }
- /* Set chan/link to backpressure TL3 instead of TL2 */
- rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
+ /* Set chan/link to backpressure TL3 instead of TL2 */
+ rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
- /* Disable SQ manager's sticky mode operation (set TM6 = 0)
- * This sticky mode is known to cause SQ stalls when multiple
- * SQs are mapped to same SMQ and transmitting pkts at a time.
- */
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
- cfg &= ~BIT_ULL(15);
- rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
- }
+ /* Disable SQ manager's sticky mode operation (set TM6 = 0)
+ * This sticky mode is known to cause SQ stalls when multiple
+ * SQs are mapped to same SMQ and transmitting pkts at a time.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
+ cfg &= ~BIT_ULL(15);
+ rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
ltdefs = rvu->kpu.lt_def;
/* Calibrate X2P bus to check if CGX/LBK links are fine */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index be709f83f331..e8b73b9d75e3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -2181,7 +2181,6 @@ void rvu_npc_freemem(struct rvu *rvu)
kfree(pkind->rsrc.bmap);
npc_mcam_rsrcs_deinit(rvu);
- kfree(mcam->counters.bmap);
if (rvu->kpu_prfl_addr)
iounmap(rvu->kpu_prfl_addr);
else
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 87bdb93cb066..f4655a8c0705 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -689,6 +689,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_match_control match;
+ u32 val;
flow_rule_match_control(rule, &match);
if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
@@ -697,12 +698,14 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
}
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ val = match.key->flags & FLOW_DIS_IS_FRAGMENT;
if (ntohs(flow_spec->etype) == ETH_P_IP) {
- flow_spec->ip_flag = IPV4_FLAG_MORE;
+ flow_spec->ip_flag = val ? IPV4_FLAG_MORE : 0;
flow_mask->ip_flag = IPV4_FLAG_MORE;
req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
} else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
- flow_spec->next_header = IPPROTO_FRAGMENT;
+ flow_spec->next_header = val ?
+ IPPROTO_FRAGMENT : 0;
flow_mask->next_header = 0xff;
req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
} else {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
index 1e77bbf5d22a..1723e9912ae0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
@@ -382,6 +382,7 @@ static void otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node *parent,
otx2_qos_read_txschq_cfg_tl(node, cfg);
cnt = cfg->static_node_pos[node->level];
cfg->schq_contig_list[node->level][cnt] = node->schq;
+ cfg->schq_index_used[node->level][cnt] = true;
cfg->schq_contig[node->level]++;
cfg->static_node_pos[node->level]++;
otx2_qos_read_txschq_cfg_schq(node, cfg);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index c895e265ae0e..61334a71058c 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -1074,13 +1074,13 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
static void
mtk_wed_stop(struct mtk_wed_device *dev)
{
+ mtk_wed_dma_disable(dev);
mtk_wed_set_ext_int(dev, false);
wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
- wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
if (!mtk_wed_get_rx_capa(dev))
return;
@@ -1093,7 +1093,6 @@ static void
mtk_wed_deinit(struct mtk_wed_device *dev)
{
mtk_wed_stop(dev);
- mtk_wed_dma_disable(dev);
wed_clr(dev, MTK_WED_CTRL,
MTK_WED_CTRL_WDMA_INT_AGENT_EN |
@@ -2605,9 +2604,6 @@ mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
static void
mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
{
- if (!dev->running)
- return;
-
mtk_wed_set_ext_int(dev, !!mask);
wed_w32(dev, MTK_WED_INT_MASK, mask);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index 86f1854698b4..883c044852f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -95,9 +95,15 @@ static inline void mlx5e_ptp_metadata_fifo_push(struct mlx5e_ptp_metadata_fifo *
}
static inline u8
+mlx5e_ptp_metadata_fifo_peek(struct mlx5e_ptp_metadata_fifo *fifo)
+{
+ return fifo->data[fifo->mask & fifo->cc];
+}
+
+static inline void
mlx5e_ptp_metadata_fifo_pop(struct mlx5e_ptp_metadata_fifo *fifo)
{
- return fifo->data[fifo->mask & fifo->cc++];
+ fifo->cc++;
}
static inline void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index e87e26f2c669..6743806b8480 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -83,24 +83,25 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
txq_ix = mlx5e_qid_from_qos(chs, node_qid);
- WARN_ON(node_qid > priv->htb_max_qos_sqs);
- if (node_qid == priv->htb_max_qos_sqs) {
- struct mlx5e_sq_stats *stats, **stats_list = NULL;
-
- if (priv->htb_max_qos_sqs == 0) {
- stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
- sizeof(*stats_list),
- GFP_KERNEL);
- if (!stats_list)
- return -ENOMEM;
- }
+ WARN_ON(node_qid >= mlx5e_htb_cur_leaf_nodes(priv->htb));
+ if (!priv->htb_qos_sq_stats) {
+ struct mlx5e_sq_stats **stats_list;
+
+ stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
+ sizeof(*stats_list), GFP_KERNEL);
+ if (!stats_list)
+ return -ENOMEM;
+
+ WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
+ }
+
+ if (!priv->htb_qos_sq_stats[node_qid]) {
+ struct mlx5e_sq_stats *stats;
+
stats = kzalloc(sizeof(*stats), GFP_KERNEL);
- if (!stats) {
- kvfree(stats_list);
+ if (!stats)
return -ENOMEM;
- }
- if (stats_list)
- WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
+
WRITE_ONCE(priv->htb_qos_sq_stats[node_qid], stats);
/* Order htb_max_qos_sqs increment after writing the array pointer.
* Pairs with smp_load_acquire in en_stats.c.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 0ab9db319530..22918b2ef7f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -108,7 +108,10 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
mlx5e_reset_txqsq_cc_pc(sq);
sq->stats->recover++;
clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
+ rtnl_lock();
mlx5e_activate_txqsq(sq);
+ rtnl_unlock();
+
if (sq->channel)
mlx5e_trigger_napi_icosq(sq->channel);
else
@@ -179,12 +182,16 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
carrier_ok = netif_carrier_ok(netdev);
netif_carrier_off(netdev);
+ rtnl_lock();
mlx5e_deactivate_priv_channels(priv);
+ rtnl_unlock();
mlx5e_ptp_close(chs->ptp);
err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
+ rtnl_lock();
mlx5e_activate_priv_channels(priv);
+ rtnl_unlock();
/* return carrier back if needed */
if (carrier_ok)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
index bcafb4bf9415..8d9a3b5ec973 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
@@ -179,6 +179,13 @@ u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels)
return min_t(u32, rqt_size, max_cap_rqt_size);
}
+#define MLX5E_MAX_RQT_SIZE_ALLOWED_WITH_XOR8_HASH 256
+
+unsigned int mlx5e_rqt_max_num_channels_allowed_for_xor8(void)
+{
+ return MLX5E_MAX_RQT_SIZE_ALLOWED_WITH_XOR8_HASH / MLX5E_UNIFORM_SPREAD_RQT_FACTOR;
+}
+
void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt)
{
mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
index e0bc30308c77..2f9e04a8418f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
@@ -38,6 +38,7 @@ static inline u32 mlx5e_rqt_get_rqtn(struct mlx5e_rqt *rqt)
}
u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels);
+unsigned int mlx5e_rqt_max_num_channels_allowed_for_xor8(void);
int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn, u32 *vhca_id);
int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids,
unsigned int num_rqns,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
index f675b1926340..f66bbc846464 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
@@ -57,6 +57,7 @@ int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
{
+ mutex_lock(selq->state_lock);
WARN_ON_ONCE(selq->is_prepared);
kvfree(selq->standby);
@@ -67,6 +68,7 @@ void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
kvfree(selq->standby);
selq->standby = NULL;
+ mutex_unlock(selq->state_lock);
}
void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index b2cabd6ab86c..cc9bcc420032 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -1640,6 +1640,7 @@ static const struct macsec_ops macsec_offload_ops = {
.mdo_add_secy = mlx5e_macsec_add_secy,
.mdo_upd_secy = mlx5e_macsec_upd_secy,
.mdo_del_secy = mlx5e_macsec_del_secy,
+ .rx_uses_md_dst = true,
};
bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index c7f542d0b8f0..93cf23278d93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -46,6 +46,10 @@ struct arfs_table {
struct hlist_head rules_hash[ARFS_HASH_SIZE];
};
+enum {
+ MLX5E_ARFS_STATE_ENABLED,
+};
+
enum arfs_type {
ARFS_IPV4_TCP,
ARFS_IPV6_TCP,
@@ -60,6 +64,7 @@ struct mlx5e_arfs_tables {
spinlock_t arfs_lock;
int last_filter_id;
struct workqueue_struct *wq;
+ unsigned long state;
};
struct arfs_tuple {
@@ -170,6 +175,8 @@ int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
return err;
}
}
+ set_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
+
return 0;
}
@@ -455,6 +462,8 @@ static void arfs_del_rules(struct mlx5e_flow_steering *fs)
int i;
int j;
+ clear_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
+
spin_lock_bh(&arfs->arfs_lock);
mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) {
hlist_del_init(&rule->hlist);
@@ -627,17 +636,8 @@ static void arfs_handle_work(struct work_struct *work)
struct mlx5_flow_handle *rule;
arfs = mlx5e_fs_get_arfs(priv->fs);
- mutex_lock(&priv->state_lock);
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- spin_lock_bh(&arfs->arfs_lock);
- hlist_del(&arfs_rule->hlist);
- spin_unlock_bh(&arfs->arfs_lock);
-
- mutex_unlock(&priv->state_lock);
- kfree(arfs_rule);
- goto out;
- }
- mutex_unlock(&priv->state_lock);
+ if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state))
+ return;
if (!arfs_rule->rule) {
rule = arfs_add_rule(priv, arfs_rule);
@@ -753,6 +753,11 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
return -EPROTONOSUPPORT;
spin_lock_bh(&arfs->arfs_lock);
+ if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state)) {
+ spin_unlock_bh(&arfs->arfs_lock);
+ return -EPERM;
+ }
+
arfs_rule = arfs_find_rule(arfs_t, &fk);
if (arfs_rule) {
if (arfs_rule->rxq == rxq_index || work_busy(&arfs_rule->arfs_work)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index cc51ce16df14..67a29826bb57 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -451,6 +451,34 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock);
+ if (mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc == ETH_RSS_HASH_XOR) {
+ unsigned int xor8_max_channels = mlx5e_rqt_max_num_channels_allowed_for_xor8();
+
+ if (count > xor8_max_channels) {
+ err = -EINVAL;
+ netdev_err(priv->netdev, "%s: Requested number of channels (%d) exceeds the maximum allowed by the XOR8 RSS hfunc (%d)\n",
+ __func__, count, xor8_max_channels);
+ goto out;
+ }
+ }
+
+ /* If RXFH is configured, changing the channels number is allowed only if
+ * it does not require resizing the RSS table. This is because the previous
+ * configuration may no longer be compatible with the new RSS table.
+ */
+ if (netif_is_rxfh_configured(priv->netdev)) {
+ int cur_rqt_size = mlx5e_rqt_size(priv->mdev, cur_params->num_channels);
+ int new_rqt_size = mlx5e_rqt_size(priv->mdev, count);
+
+ if (new_rqt_size != cur_rqt_size) {
+ err = -EINVAL;
+ netdev_err(priv->netdev,
+ "%s: RXFH is configured, block changing channels number that affects RSS table size (new: %d, current: %d)\n",
+ __func__, new_rqt_size, cur_rqt_size);
+ goto out;
+ }
+ }
+
/* Don't allow changing the number of channels if HTB offload is active,
* because the numeration of the QoS SQs will change, while per-queue
* qdiscs are attached.
@@ -561,12 +589,12 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
static void
mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
{
- struct mlx5_core_dev *mdev = priv->mdev;
int tc;
int i;
for (i = 0; i < priv->channels.num; ++i) {
struct mlx5e_channel *c = priv->channels.c[i];
+ struct mlx5_core_dev *mdev = c->mdev;
for (tc = 0; tc < c->num_tc; tc++) {
mlx5_core_modify_cq_moderation(mdev,
@@ -580,11 +608,11 @@ mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coal
static void
mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
{
- struct mlx5_core_dev *mdev = priv->mdev;
int i;
for (i = 0; i < priv->channels.num; ++i) {
struct mlx5e_channel *c = priv->channels.c[i];
+ struct mlx5_core_dev *mdev = c->mdev;
mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
coal->rx_coalesce_usecs,
@@ -1281,17 +1309,30 @@ int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
struct mlx5e_priv *priv = netdev_priv(dev);
u32 *rss_context = &rxfh->rss_context;
u8 hfunc = rxfh->hfunc;
+ unsigned int count;
int err;
mutex_lock(&priv->state_lock);
+
+ count = priv->channels.params.num_channels;
+
+ if (hfunc == ETH_RSS_HASH_XOR) {
+ unsigned int xor8_max_channels = mlx5e_rqt_max_num_channels_allowed_for_xor8();
+
+ if (count > xor8_max_channels) {
+ err = -EINVAL;
+ netdev_err(priv->netdev, "%s: Cannot set RSS hash function to XOR, current number of channels (%d) exceeds the maximum allowed for XOR8 RSS hfunc (%d)\n",
+ __func__, count, xor8_max_channels);
+ goto unlock;
+ }
+ }
+
if (*rss_context && rxfh->rss_delete) {
err = mlx5e_rx_res_rss_destroy(priv->rx_res, *rss_context);
goto unlock;
}
if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- unsigned int count = priv->channels.params.num_channels;
-
err = mlx5e_rx_res_rss_init(priv->rx_res, rss_context, count);
if (err)
goto unlock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 91848eae4565..319930c04093 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -209,8 +209,8 @@ static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
*data,
mlx5e_devcom_event_mpv,
priv);
- if (IS_ERR_OR_NULL(priv->devcom))
- return -EOPNOTSUPP;
+ if (IS_ERR(priv->devcom))
+ return PTR_ERR(priv->devcom);
if (mlx5_core_is_mp_master(priv->mdev)) {
mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_UP,
@@ -5726,9 +5726,7 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
kfree(priv->tx_rates);
kfree(priv->txq2sq);
destroy_workqueue(priv->wq);
- mutex_lock(&priv->state_lock);
mlx5e_selq_cleanup(&priv->selq);
- mutex_unlock(&priv->state_lock);
free_cpumask_var(priv->scratchpad.cpumask);
for (i = 0; i < priv->htb_max_qos_sqs; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 2fa076b23fbe..e21a3b4128ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -398,6 +398,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata);
+ mlx5e_ptp_metadata_fifo_pop(&sq->ptpsq->metadata_freelist);
+
mlx5e_skb_cb_hwtstamp_init(skb);
mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
metadata_index);
@@ -496,9 +498,6 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
err_drop:
stats->dropped++;
- if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
- mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist,
- be32_to_cpu(eseg->flow_table_metadata));
dev_kfree_skb_any(skb);
mlx5e_tx_flush(sq);
}
@@ -657,7 +656,7 @@ static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
{
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
eseg->flow_table_metadata =
- cpu_to_be32(mlx5e_ptp_metadata_fifo_pop(&ptpsq->metadata_freelist));
+ cpu_to_be32(mlx5e_ptp_metadata_fifo_peek(&ptpsq->metadata_freelist));
}
static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 3047d7015c52..1789800faaeb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1868,6 +1868,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
if (err)
goto abort;
+ dev->priv.eswitch = esw;
err = esw_offloads_init(esw);
if (err)
goto reps_err;
@@ -1892,11 +1893,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
- if (MLX5_ESWITCH_MANAGER(dev) &&
- mlx5_esw_vport_match_metadata_supported(esw))
- esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
-
- dev->priv.eswitch = esw;
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
esw_info(dev,
@@ -1908,6 +1904,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
reps_err:
mlx5_esw_vports_cleanup(esw);
+ dev->priv.eswitch = NULL;
abort:
if (esw->work_queue)
destroy_workqueue(esw->work_queue);
@@ -1926,7 +1923,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw_info(esw->dev, "cleanup\n");
- esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
WARN_ON(refcount_read(&esw->qos.refcnt));
mutex_destroy(&esw->state_lock);
@@ -1937,6 +1933,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
mutex_destroy(&esw->offloads.encap_tbl_lock);
mutex_destroy(&esw->offloads.decap_tbl_lock);
esw_offloads_cleanup(esw);
+ esw->dev->priv.eswitch = NULL;
mlx5_esw_vports_cleanup(esw);
debugfs_remove_recursive(esw->debugfs_root);
devl_params_unregister(priv_to_devlink(esw->dev), mlx5_eswitch_params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index baaae628b0a0..844d3e3a65dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -43,6 +43,7 @@
#include "rdma.h"
#include "en.h"
#include "fs_core.h"
+#include "lib/mlx5.h"
#include "lib/devcom.h"
#include "lib/eq.h"
#include "lib/fs_chains.h"
@@ -2476,6 +2477,10 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
if (err)
return err;
+ if (MLX5_ESWITCH_MANAGER(esw->dev) &&
+ mlx5_esw_vport_match_metadata_supported(esw))
+ esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+
err = devl_params_register(priv_to_devlink(esw->dev),
esw_devlink_params,
ARRAY_SIZE(esw_devlink_params));
@@ -3055,7 +3060,7 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
key,
mlx5_esw_offloads_devcom_event,
esw);
- if (IS_ERR_OR_NULL(esw->devcom))
+ if (IS_ERR(esw->devcom))
return;
mlx5_devcom_send_event(esw->devcom,
@@ -3707,6 +3712,12 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && mlx5_get_sd(esw->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't change E-Switch mode to switchdev when multi-PF netdev (Socket Direct) is configured.");
+ return -EPERM;
+ }
+
mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index e6bfa7e4f146..cf085a478e3e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1664,6 +1664,16 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
return err;
}
+static bool mlx5_pkt_reformat_cmp(struct mlx5_pkt_reformat *p1,
+ struct mlx5_pkt_reformat *p2)
+{
+ return p1->owner == p2->owner &&
+ (p1->owner == MLX5_FLOW_RESOURCE_OWNER_FW ?
+ p1->id == p2->id :
+ mlx5_fs_dr_action_get_pkt_reformat_id(p1) ==
+ mlx5_fs_dr_action_get_pkt_reformat_id(p2));
+}
+
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
struct mlx5_flow_destination *d2)
{
@@ -1675,8 +1685,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
(d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
- (d1->vport.pkt_reformat->id ==
- d2->vport.pkt_reformat->id) : true)) ||
+ mlx5_pkt_reformat_cmp(d1->vport.pkt_reformat,
+ d2->vport.pkt_reformat) : true)) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
d1->ft == d2->ft) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
@@ -1808,8 +1818,9 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
}
trace_mlx5_fs_set_fte(fte, false);
+ /* Link newly added rules into the tree. */
for (i = 0; i < handle->num_rules; i++) {
- if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
+ if (!handle->rule[i]->node.parent) {
tree_add_node(&handle->rule[i]->node, &fte->node);
trace_mlx5_fs_add_rule(handle->rule[i]);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index d14459e5c04f..69d482f7c5a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -703,8 +703,10 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev)
return err;
}
- if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
+ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
mlx5_lag_port_sel_destroy(ldev);
+ ldev->buckets = 1;
+ }
if (mlx5_lag_has_drop_rule(ldev))
mlx5_lag_drop_rule_cleanup(ldev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index e7d59cfa8708..7b0766c89f4c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -220,7 +220,7 @@ mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
struct mlx5_devcom_comp *comp;
if (IS_ERR_OR_NULL(devc))
- return NULL;
+ return ERR_PTR(-EINVAL);
mutex_lock(&comp_list_lock);
comp = devcom_component_get(devc, id, key, handler);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
index 5b28084e8a03..dd5d186dc614 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
@@ -213,8 +213,8 @@ static int sd_register(struct mlx5_core_dev *dev)
sd = mlx5_get_sd(dev);
devcom = mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_SD_GROUP,
sd->group_id, NULL, dev);
- if (!devcom)
- return -ENOMEM;
+ if (IS_ERR(devcom))
+ return PTR_ERR(devcom);
sd->devcom = devcom;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c2593625c09a..331ce47f51a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -956,7 +956,7 @@ static void mlx5_register_hca_devcom_comp(struct mlx5_core_dev *dev)
mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_HCA_PORTS,
mlx5_query_nic_system_image_guid(dev),
NULL, dev);
- if (IS_ERR_OR_NULL(dev->priv.hca_devcom_comp))
+ if (IS_ERR(dev->priv.hca_devcom_comp))
mlx5_core_err(dev, "Failed to register devcom HCA component\n");
}
@@ -1480,6 +1480,14 @@ int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev)
if (err)
goto err_register;
+ err = mlx5_crdump_enable(dev);
+ if (err)
+ mlx5_core_err(dev, "mlx5_crdump_enable failed with error code %d\n", err);
+
+ err = mlx5_hwmon_dev_register(dev);
+ if (err)
+ mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err);
+
mutex_unlock(&dev->intf_state_mutex);
return 0;
@@ -1505,7 +1513,10 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
int err;
devl_lock(devlink);
+ devl_register(devlink);
err = mlx5_init_one_devl_locked(dev);
+ if (err)
+ devl_unregister(devlink);
devl_unlock(devlink);
return err;
}
@@ -1517,6 +1528,8 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
devl_lock(devlink);
mutex_lock(&dev->intf_state_mutex);
+ mlx5_hwmon_dev_unregister(dev);
+ mlx5_crdump_disable(dev);
mlx5_unregister_device(dev);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
@@ -1534,6 +1547,7 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
mlx5_function_teardown(dev, true);
out:
mutex_unlock(&dev->intf_state_mutex);
+ devl_unregister(devlink);
devl_unlock(devlink);
}
@@ -1680,16 +1694,23 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
}
devl_lock(devlink);
+ devl_register(devlink);
+
err = mlx5_devlink_params_register(priv_to_devlink(dev));
- devl_unlock(devlink);
if (err) {
mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err);
- goto query_hca_caps_err;
+ goto params_reg_err;
}
+ devl_unlock(devlink);
return 0;
+params_reg_err:
+ devl_unregister(devlink);
+ devl_unlock(devlink);
query_hca_caps_err:
+ devl_unregister(devlink);
+ devl_unlock(devlink);
mlx5_function_disable(dev, true);
out:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
@@ -1702,6 +1723,7 @@ void mlx5_uninit_one_light(struct mlx5_core_dev *dev)
devl_lock(devlink);
mlx5_devlink_params_unregister(priv_to_devlink(dev));
+ devl_unregister(devlink);
devl_unlock(devlink);
if (dev->state != MLX5_DEVICE_STATE_UP)
return;
@@ -1943,16 +1965,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_init_one;
}
- err = mlx5_crdump_enable(dev);
- if (err)
- dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
-
- err = mlx5_hwmon_dev_register(dev);
- if (err)
- mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err);
-
pci_save_state(pdev);
- devlink_register(devlink);
return 0;
err_init_one:
@@ -1973,16 +1986,9 @@ static void remove_one(struct pci_dev *pdev)
struct devlink *devlink = priv_to_devlink(dev);
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
- /* mlx5_drain_fw_reset() and mlx5_drain_health_wq() are using
- * devlink notify APIs.
- * Hence, we must drain them before unregistering the devlink.
- */
mlx5_drain_fw_reset(dev);
mlx5_drain_health_wq(dev);
- devlink_unregister(devlink);
mlx5_sriov_disable(pdev, false);
- mlx5_hwmon_dev_unregister(dev);
- mlx5_crdump_disable(dev);
mlx5_uninit_one(dev);
mlx5_pci_close(dev);
mlx5_mdev_uninit(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 4dcf995cb1a2..6bac8ad70ba6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -19,6 +19,7 @@
#define MLX5_IRQ_CTRL_SF_MAX 8
/* min num of vectors for SFs to be enabled */
#define MLX5_IRQ_VEC_COMP_BASE_SF 2
+#define MLX5_IRQ_VEC_COMP_BASE 1
#define MLX5_EQ_SHARE_IRQ_MAX_COMP (8)
#define MLX5_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX)
@@ -246,6 +247,7 @@ static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
return;
}
+ vecidx -= MLX5_IRQ_VEC_COMP_BASE;
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx);
}
@@ -585,7 +587,7 @@ struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
struct mlx5_irq_pool *pool = table->pcif_pool;
struct irq_affinity_desc af_desc;
- int offset = 1;
+ int offset = MLX5_IRQ_VEC_COMP_BASE;
if (!pool->xa_num_irqs.max)
offset = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index bc863e1f062e..7ebe71280827 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -75,7 +75,6 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
goto peer_devlink_set_err;
}
- devlink_register(devlink);
return 0;
peer_devlink_set_err:
@@ -101,7 +100,6 @@ static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
devlink = priv_to_devlink(mdev);
set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
mlx5_drain_health_wq(mdev);
- devlink_unregister(devlink);
if (mlx5_dev_is_lightweight(mdev))
mlx5_uninit_one_light(mdev);
else
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
index 64f4cc284aea..030a5776c937 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
@@ -205,12 +205,11 @@ dr_dump_hex_print(char hex[DR_HEX_SIZE], char *src, u32 size)
}
static int
-dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
+dr_dump_rule_action_mem(struct seq_file *file, char *buff, const u64 rule_id,
struct mlx5dr_rule_action_member *action_mem)
{
struct mlx5dr_action *action = action_mem->action;
const u64 action_id = DR_DBG_PTR_TO_ID(action);
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
u64 hit_tbl_ptr, miss_tbl_ptr;
u32 hit_tbl_id, miss_tbl_id;
int ret;
@@ -488,10 +487,9 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
}
static int
-dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
+dr_dump_rule_mem(struct seq_file *file, char *buff, struct mlx5dr_ste *ste,
bool is_rx, const u64 rule_id, u8 format_ver)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
char hw_ste_dump[DR_HEX_SIZE];
u32 mem_rec_type;
int ret;
@@ -522,7 +520,8 @@ dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
}
static int
-dr_dump_rule_rx_tx(struct seq_file *file, struct mlx5dr_rule_rx_tx *rule_rx_tx,
+dr_dump_rule_rx_tx(struct seq_file *file, char *buff,
+ struct mlx5dr_rule_rx_tx *rule_rx_tx,
bool is_rx, const u64 rule_id, u8 format_ver)
{
struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES];
@@ -533,7 +532,7 @@ dr_dump_rule_rx_tx(struct seq_file *file, struct mlx5dr_rule_rx_tx *rule_rx_tx,
return 0;
while (i--) {
- ret = dr_dump_rule_mem(file, ste_arr[i], is_rx, rule_id,
+ ret = dr_dump_rule_mem(file, buff, ste_arr[i], is_rx, rule_id,
format_ver);
if (ret < 0)
return ret;
@@ -542,7 +541,8 @@ dr_dump_rule_rx_tx(struct seq_file *file, struct mlx5dr_rule_rx_tx *rule_rx_tx,
return 0;
}
-static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
+static noinline_for_stack int
+dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
{
struct mlx5dr_rule_action_member *action_mem;
const u64 rule_id = DR_DBG_PTR_TO_ID(rule);
@@ -565,19 +565,19 @@ static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
return ret;
if (rx->nic_matcher) {
- ret = dr_dump_rule_rx_tx(file, rx, true, rule_id, format_ver);
+ ret = dr_dump_rule_rx_tx(file, buff, rx, true, rule_id, format_ver);
if (ret < 0)
return ret;
}
if (tx->nic_matcher) {
- ret = dr_dump_rule_rx_tx(file, tx, false, rule_id, format_ver);
+ ret = dr_dump_rule_rx_tx(file, buff, tx, false, rule_id, format_ver);
if (ret < 0)
return ret;
}
list_for_each_entry(action_mem, &rule->rule_actions_list, list) {
- ret = dr_dump_rule_action_mem(file, rule_id, action_mem);
+ ret = dr_dump_rule_action_mem(file, buff, rule_id, action_mem);
if (ret < 0)
return ret;
}
@@ -586,10 +586,10 @@ static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
}
static int
-dr_dump_matcher_mask(struct seq_file *file, struct mlx5dr_match_param *mask,
+dr_dump_matcher_mask(struct seq_file *file, char *buff,
+ struct mlx5dr_match_param *mask,
u8 criteria, const u64 matcher_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
char dump[DR_HEX_SIZE];
int ret;
@@ -681,10 +681,10 @@ dr_dump_matcher_mask(struct seq_file *file, struct mlx5dr_match_param *mask,
}
static int
-dr_dump_matcher_builder(struct seq_file *file, struct mlx5dr_ste_build *builder,
+dr_dump_matcher_builder(struct seq_file *file, char *buff,
+ struct mlx5dr_ste_build *builder,
u32 index, bool is_rx, const u64 matcher_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
int ret;
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
@@ -702,11 +702,10 @@ dr_dump_matcher_builder(struct seq_file *file, struct mlx5dr_ste_build *builder,
}
static int
-dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
+dr_dump_matcher_rx_tx(struct seq_file *file, char *buff, bool is_rx,
struct mlx5dr_matcher_rx_tx *matcher_rx_tx,
const u64 matcher_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
enum dr_dump_rec_type rec_type;
u64 s_icm_addr, e_icm_addr;
int i, ret;
@@ -731,7 +730,7 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
return ret;
for (i = 0; i < matcher_rx_tx->num_of_builders; i++) {
- ret = dr_dump_matcher_builder(file,
+ ret = dr_dump_matcher_builder(file, buff,
&matcher_rx_tx->ste_builder[i],
i, is_rx, matcher_id);
if (ret < 0)
@@ -741,7 +740,7 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
return 0;
}
-static int
+static noinline_for_stack int
dr_dump_matcher(struct seq_file *file, struct mlx5dr_matcher *matcher)
{
struct mlx5dr_matcher_rx_tx *rx = &matcher->rx;
@@ -763,19 +762,19 @@ dr_dump_matcher(struct seq_file *file, struct mlx5dr_matcher *matcher)
if (ret)
return ret;
- ret = dr_dump_matcher_mask(file, &matcher->mask,
+ ret = dr_dump_matcher_mask(file, buff, &matcher->mask,
matcher->match_criteria, matcher_id);
if (ret < 0)
return ret;
if (rx->nic_tbl) {
- ret = dr_dump_matcher_rx_tx(file, true, rx, matcher_id);
+ ret = dr_dump_matcher_rx_tx(file, buff, true, rx, matcher_id);
if (ret < 0)
return ret;
}
if (tx->nic_tbl) {
- ret = dr_dump_matcher_rx_tx(file, false, tx, matcher_id);
+ ret = dr_dump_matcher_rx_tx(file, buff, false, tx, matcher_id);
if (ret < 0)
return ret;
}
@@ -803,11 +802,10 @@ dr_dump_matcher_all(struct seq_file *file, struct mlx5dr_matcher *matcher)
}
static int
-dr_dump_table_rx_tx(struct seq_file *file, bool is_rx,
+dr_dump_table_rx_tx(struct seq_file *file, char *buff, bool is_rx,
struct mlx5dr_table_rx_tx *table_rx_tx,
const u64 table_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
enum dr_dump_rec_type rec_type;
u64 s_icm_addr;
int ret;
@@ -829,7 +827,8 @@ dr_dump_table_rx_tx(struct seq_file *file, bool is_rx,
return 0;
}
-static int dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
+static noinline_for_stack int
+dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
{
struct mlx5dr_table_rx_tx *rx = &table->rx;
struct mlx5dr_table_rx_tx *tx = &table->tx;
@@ -848,14 +847,14 @@ static int dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
return ret;
if (rx->nic_dmn) {
- ret = dr_dump_table_rx_tx(file, true, rx,
+ ret = dr_dump_table_rx_tx(file, buff, true, rx,
DR_DBG_PTR_TO_ID(table));
if (ret < 0)
return ret;
}
if (tx->nic_dmn) {
- ret = dr_dump_table_rx_tx(file, false, tx,
+ ret = dr_dump_table_rx_tx(file, buff, false, tx,
DR_DBG_PTR_TO_ID(table));
if (ret < 0)
return ret;
@@ -881,10 +880,10 @@ static int dr_dump_table_all(struct seq_file *file, struct mlx5dr_table *tbl)
}
static int
-dr_dump_send_ring(struct seq_file *file, struct mlx5dr_send_ring *ring,
+dr_dump_send_ring(struct seq_file *file, char *buff,
+ struct mlx5dr_send_ring *ring,
const u64 domain_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
int ret;
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
@@ -902,13 +901,13 @@ dr_dump_send_ring(struct seq_file *file, struct mlx5dr_send_ring *ring,
return 0;
}
-static noinline_for_stack int
+static int
dr_dump_domain_info_flex_parser(struct seq_file *file,
+ char *buff,
const char *flex_parser_name,
const u8 flex_parser_value,
const u64 domain_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
int ret;
ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
@@ -925,11 +924,11 @@ dr_dump_domain_info_flex_parser(struct seq_file *file,
return 0;
}
-static noinline_for_stack int
-dr_dump_domain_info_caps(struct seq_file *file, struct mlx5dr_cmd_caps *caps,
+static int
+dr_dump_domain_info_caps(struct seq_file *file, char *buff,
+ struct mlx5dr_cmd_caps *caps,
const u64 domain_id)
{
- char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
struct mlx5dr_cmd_vport_cap *vport_caps;
unsigned long i, vports_num;
int ret;
@@ -969,34 +968,35 @@ dr_dump_domain_info_caps(struct seq_file *file, struct mlx5dr_cmd_caps *caps,
}
static int
-dr_dump_domain_info(struct seq_file *file, struct mlx5dr_domain_info *info,
+dr_dump_domain_info(struct seq_file *file, char *buff,
+ struct mlx5dr_domain_info *info,
const u64 domain_id)
{
int ret;
- ret = dr_dump_domain_info_caps(file, &info->caps, domain_id);
+ ret = dr_dump_domain_info_caps(file, buff, &info->caps, domain_id);
if (ret < 0)
return ret;
- ret = dr_dump_domain_info_flex_parser(file, "icmp_dw0",
+ ret = dr_dump_domain_info_flex_parser(file, buff, "icmp_dw0",
info->caps.flex_parser_id_icmp_dw0,
domain_id);
if (ret < 0)
return ret;
- ret = dr_dump_domain_info_flex_parser(file, "icmp_dw1",
+ ret = dr_dump_domain_info_flex_parser(file, buff, "icmp_dw1",
info->caps.flex_parser_id_icmp_dw1,
domain_id);
if (ret < 0)
return ret;
- ret = dr_dump_domain_info_flex_parser(file, "icmpv6_dw0",
+ ret = dr_dump_domain_info_flex_parser(file, buff, "icmpv6_dw0",
info->caps.flex_parser_id_icmpv6_dw0,
domain_id);
if (ret < 0)
return ret;
- ret = dr_dump_domain_info_flex_parser(file, "icmpv6_dw1",
+ ret = dr_dump_domain_info_flex_parser(file, buff, "icmpv6_dw1",
info->caps.flex_parser_id_icmpv6_dw1,
domain_id);
if (ret < 0)
@@ -1032,12 +1032,12 @@ dr_dump_domain(struct seq_file *file, struct mlx5dr_domain *dmn)
if (ret)
return ret;
- ret = dr_dump_domain_info(file, &dmn->info, domain_id);
+ ret = dr_dump_domain_info(file, buff, &dmn->info, domain_id);
if (ret < 0)
return ret;
if (dmn->info.supp_sw_steering) {
- ret = dr_dump_send_ring(file, dmn->send_ring, domain_id);
+ ret = dr_dump_send_ring(file, buff, dmn->send_ring, domain_id);
if (ret < 0)
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index e4d7739bd7c8..4a79c0d7e7ad 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -849,7 +849,7 @@ free_skb:
static const struct mlxsw_listener mlxsw_emad_rx_listener =
MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
- EMAD, DISCARD);
+ EMAD, FORWARD);
static int mlxsw_emad_tlv_enable(struct mlxsw_core *mlxsw_core)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 53b150b7ae4e..6c06b0592760 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -1357,24 +1357,20 @@ static struct mlxsw_linecards_event_ops mlxsw_env_event_ops = {
.got_inactive = mlxsw_env_got_inactive,
};
-static int mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env)
+static void mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env)
{
char mcam_pl[MLXSW_REG_MCAM_LEN];
- bool mcia_128b_supported;
+ bool mcia_128b_supported = false;
int err;
mlxsw_reg_mcam_pack(mcam_pl,
MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
err = mlxsw_reg_query(mlxsw_env->core, MLXSW_REG(mcam), mcam_pl);
- if (err)
- return err;
-
- mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B,
- &mcia_128b_supported);
+ if (!err)
+ mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B,
+ &mcia_128b_supported);
mlxsw_env->max_eeprom_len = mcia_128b_supported ? 128 : 48;
-
- return 0;
}
int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
@@ -1445,15 +1441,11 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
if (err)
goto err_type_set;
- err = mlxsw_env_max_module_eeprom_len_query(env);
- if (err)
- goto err_eeprom_len_query;
-
+ mlxsw_env_max_module_eeprom_len_query(env);
env->line_cards[0]->active = true;
return 0;
-err_eeprom_len_query:
err_type_set:
mlxsw_env_module_event_disable(env, 0);
err_mlxsw_env_module_event_enable:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index af99bf17eb36..f42a1b1c9368 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1530,7 +1530,7 @@ mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
char mcam_pl[MLXSW_REG_MCAM_LEN];
- bool pci_reset_supported;
+ bool pci_reset_supported = false;
u32 sys_status;
int err;
@@ -1548,11 +1548,9 @@ mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
mlxsw_reg_mcam_pack(mcam_pl,
MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
err = mlxsw_reg_query(mlxsw_pci->core, MLXSW_REG(mcam), mcam_pl);
- if (err)
- return err;
-
- mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
- &pci_reset_supported);
+ if (!err)
+ mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
+ &pci_reset_supported);
if (pci_reset_supported) {
pci_dbg(pdev, "Starting PCI reset flow\n");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index f20052776b3f..92a406f02eae 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -10,6 +10,7 @@
#include <linux/netdevice.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
+#include <linux/idr.h>
#include <net/devlink.h>
#include <trace/events/mlxsw.h>
@@ -58,41 +59,43 @@ int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
u16 *p_id)
{
- u16 id;
+ int id;
- id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
- if (id < tcam->max_regions) {
- __set_bit(id, tcam->used_regions);
- *p_id = id;
- return 0;
- }
- return -ENOBUFS;
+ id = ida_alloc_max(&tcam->used_regions, tcam->max_regions - 1,
+ GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ *p_id = id;
+
+ return 0;
}
static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
u16 id)
{
- __clear_bit(id, tcam->used_regions);
+ ida_free(&tcam->used_regions, id);
}
static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
u16 *p_id)
{
- u16 id;
+ int id;
- id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
- if (id < tcam->max_groups) {
- __set_bit(id, tcam->used_groups);
- *p_id = id;
- return 0;
- }
- return -ENOBUFS;
+ id = ida_alloc_max(&tcam->used_groups, tcam->max_groups - 1,
+ GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ *p_id = id;
+
+ return 0;
}
static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
u16 id)
{
- __clear_bit(id, tcam->used_groups);
+ ida_free(&tcam->used_groups, id);
}
struct mlxsw_sp_acl_tcam_pattern {
@@ -715,7 +718,9 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
rehash.dw.work);
int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
+ mutex_lock(&vregion->lock);
mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
+ mutex_unlock(&vregion->lock);
if (credits < 0)
/* Rehash gone out of credits so it was interrupted.
* Schedule the work as soon as possible to continue.
@@ -726,6 +731,17 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
}
static void
+mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
+{
+ /* The entry markers are relative to the current chunk and therefore
+ * needs to be reset together with the chunk marker.
+ */
+ ctx->current_vchunk = NULL;
+ ctx->start_ventry = NULL;
+ ctx->stop_ventry = NULL;
+}
+
+static void
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
{
struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
@@ -747,7 +763,7 @@ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *v
* the current chunk pointer to make sure all chunks
* are properly migrated.
*/
- vregion->rehash.ctx.current_vchunk = NULL;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(&vregion->rehash.ctx);
}
static struct mlxsw_sp_acl_tcam_vregion *
@@ -820,10 +836,14 @@ mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
+
mutex_lock(&tcam->lock);
list_del(&vregion->tlist);
mutex_unlock(&tcam->lock);
- cancel_delayed_work_sync(&vregion->rehash.dw);
+ if (cancel_delayed_work_sync(&vregion->rehash.dw) &&
+ ctx->hints_priv)
+ ops->region_rehash_hints_put(ctx->hints_priv);
}
mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
if (vregion->region2)
@@ -1154,8 +1174,14 @@ mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_ventry *ventry,
bool *activity)
{
- return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
- ventry->entry, activity);
+ struct mlxsw_sp_acl_tcam_vregion *vregion = ventry->vchunk->vregion;
+ int err;
+
+ mutex_lock(&vregion->lock);
+ err = mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, ventry->entry,
+ activity);
+ mutex_unlock(&vregion->lock);
+ return err;
}
static int
@@ -1189,6 +1215,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_tcam_chunk *new_chunk;
+ WARN_ON(vchunk->chunk2);
+
new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
if (IS_ERR(new_chunk))
return PTR_ERR(new_chunk);
@@ -1207,7 +1235,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
{
mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
vchunk->chunk2 = NULL;
- ctx->current_vchunk = NULL;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
}
static int
@@ -1230,6 +1258,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+ if (list_empty(&vchunk->ventry_list))
+ goto out;
+
/* If the migration got interrupted, we have the ventry to start from
* stored in context.
*/
@@ -1239,6 +1270,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
ventry = list_first_entry(&vchunk->ventry_list,
typeof(*ventry), list);
+ WARN_ON(ventry->vchunk != vchunk);
+
list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
/* During rollback, once we reach the ventry that failed
* to migrate, we are done.
@@ -1279,6 +1312,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
}
}
+out:
mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
return 0;
}
@@ -1292,6 +1326,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vchunk *vchunk;
int err;
+ if (list_empty(&vregion->vchunk_list))
+ return 0;
+
/* If the migration got interrupted, we have the vchunk
* we are working on stored in context.
*/
@@ -1320,16 +1357,17 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
int err, err2;
trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
- mutex_lock(&vregion->lock);
err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
if (err) {
+ if (ctx->this_is_rollback)
+ return err;
/* In case migration was not successful, we need to swap
* so the original region pointer is assigned again
* to vregion->region.
*/
swap(vregion->region, vregion->region2);
- ctx->current_vchunk = NULL;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
ctx->this_is_rollback = true;
err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
@@ -1340,7 +1378,6 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
/* Let the rollback to be continued later on. */
}
}
- mutex_unlock(&vregion->lock);
trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
return err;
}
@@ -1389,6 +1426,7 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
ctx->hints_priv = hints_priv;
ctx->this_is_rollback = false;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
return 0;
@@ -1441,7 +1479,8 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
ctx, credits);
if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
+ return;
}
if (*credits >= 0)
@@ -1549,19 +1588,11 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
if (max_tcam_regions < max_regions)
max_regions = max_tcam_regions;
- tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
- if (!tcam->used_regions) {
- err = -ENOMEM;
- goto err_alloc_used_regions;
- }
+ ida_init(&tcam->used_regions);
tcam->max_regions = max_regions;
max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
- tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
- if (!tcam->used_groups) {
- err = -ENOMEM;
- goto err_alloc_used_groups;
- }
+ ida_init(&tcam->used_groups);
tcam->max_groups = max_groups;
tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_MAX_GROUP_SIZE);
@@ -1575,10 +1606,8 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_tcam_init:
- bitmap_free(tcam->used_groups);
-err_alloc_used_groups:
- bitmap_free(tcam->used_regions);
-err_alloc_used_regions:
+ ida_destroy(&tcam->used_groups);
+ ida_destroy(&tcam->used_regions);
mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
err_rehash_params_register:
mutex_destroy(&tcam->lock);
@@ -1591,8 +1620,8 @@ void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
ops->fini(mlxsw_sp, tcam->priv);
- bitmap_free(tcam->used_groups);
- bitmap_free(tcam->used_regions);
+ ida_destroy(&tcam->used_groups);
+ ida_destroy(&tcam->used_regions);
mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
mutex_destroy(&tcam->lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 462bf448497d..79a1d8606512 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -6,15 +6,16 @@
#include <linux/list.h>
#include <linux/parman.h>
+#include <linux/idr.h>
#include "reg.h"
#include "spectrum.h"
#include "core_acl_flex_keys.h"
struct mlxsw_sp_acl_tcam {
- unsigned long *used_regions; /* bit array */
+ struct ida used_regions;
unsigned int max_regions;
- unsigned long *used_groups; /* bit array */
+ struct ida used_groups;
unsigned int max_groups;
unsigned int max_group_size;
struct mutex lock; /* guards vregion list */
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index e5ec0a363aff..31f75b4a67fd 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -368,7 +368,6 @@ union ks8851_tx_hdr {
* @rdfifo: FIFO read callback
* @wrfifo: FIFO write callback
* @start_xmit: start_xmit() implementation callback
- * @rx_skb: rx_skb() implementation callback
* @flush_tx_work: flush_tx_work() implementation callback
*
* The @statelock is used to protect information in the structure which may
@@ -423,8 +422,6 @@ struct ks8851_net {
struct sk_buff *txp, bool irq);
netdev_tx_t (*start_xmit)(struct sk_buff *skb,
struct net_device *dev);
- void (*rx_skb)(struct ks8851_net *ks,
- struct sk_buff *skb);
void (*flush_tx_work)(struct ks8851_net *ks);
};
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 0bf13b38b8f5..502518cdb461 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -232,24 +232,15 @@ static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
}
/**
- * ks8851_rx_skb - receive skbuff
- * @ks: The device state.
- * @skb: The skbuff
- */
-static void ks8851_rx_skb(struct ks8851_net *ks, struct sk_buff *skb)
-{
- ks->rx_skb(ks, skb);
-}
-
-/**
* ks8851_rx_pkts - receive packets from the host
* @ks: The device information.
+ * @rxq: Queue of packets received in this function.
*
* This is called from the IRQ work queue when the system detects that there
* are packets in the receive queue. Find out how many packets there are and
* read them from the FIFO.
*/
-static void ks8851_rx_pkts(struct ks8851_net *ks)
+static void ks8851_rx_pkts(struct ks8851_net *ks, struct sk_buff_head *rxq)
{
struct sk_buff *skb;
unsigned rxfc;
@@ -309,7 +300,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
ks8851_dbg_dumpkkt(ks, rxpkt);
skb->protocol = eth_type_trans(skb, ks->netdev);
- ks8851_rx_skb(ks, skb);
+ __skb_queue_tail(rxq, skb);
ks->netdev->stats.rx_packets++;
ks->netdev->stats.rx_bytes += rxlen;
@@ -336,9 +327,11 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
static irqreturn_t ks8851_irq(int irq, void *_ks)
{
struct ks8851_net *ks = _ks;
+ struct sk_buff_head rxq;
unsigned handled = 0;
unsigned long flags;
unsigned int status;
+ struct sk_buff *skb;
ks8851_lock(ks, &flags);
@@ -392,7 +385,8 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
* from the device so do not bother masking just the RX
* from the device. */
- ks8851_rx_pkts(ks);
+ __skb_queue_head_init(&rxq);
+ ks8851_rx_pkts(ks, &rxq);
}
/* if something stopped the rx process, probably due to wanting
@@ -416,6 +410,10 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
if (status & IRQ_LCI)
mii_check_link(&ks->mii);
+ if (status & IRQ_RXI)
+ while ((skb = __skb_dequeue(&rxq)))
+ netif_rx(skb);
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 2a7f29854267..381b9cd285eb 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -210,16 +210,6 @@ static void ks8851_wrfifo_par(struct ks8851_net *ks, struct sk_buff *txp,
iowrite16_rep(ksp->hw_addr, txp->data, len / 2);
}
-/**
- * ks8851_rx_skb_par - receive skbuff
- * @ks: The device state.
- * @skb: The skbuff
- */
-static void ks8851_rx_skb_par(struct ks8851_net *ks, struct sk_buff *skb)
-{
- netif_rx(skb);
-}
-
static unsigned int ks8851_rdreg16_par_txqcr(struct ks8851_net *ks)
{
return ks8851_rdreg16_par(ks, KS_TXQCR);
@@ -298,7 +288,6 @@ static int ks8851_probe_par(struct platform_device *pdev)
ks->rdfifo = ks8851_rdfifo_par;
ks->wrfifo = ks8851_wrfifo_par;
ks->start_xmit = ks8851_start_xmit_par;
- ks->rx_skb = ks8851_rx_skb_par;
#define STD_IRQ (IRQ_LCI | /* Link Change */ \
IRQ_RXI | /* RX done */ \
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 2f803377c9f9..670c1de966db 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -299,16 +299,6 @@ static unsigned int calc_txlen(unsigned int len)
}
/**
- * ks8851_rx_skb_spi - receive skbuff
- * @ks: The device state
- * @skb: The skbuff
- */
-static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb)
-{
- netif_rx(skb);
-}
-
-/**
* ks8851_tx_work - process tx packet(s)
* @work: The work strucutre what was scheduled.
*
@@ -435,7 +425,6 @@ static int ks8851_probe_spi(struct spi_device *spi)
ks->rdfifo = ks8851_rdfifo_spi;
ks->wrfifo = ks8851_wrfifo_spi;
ks->start_xmit = ks8851_start_xmit_spi;
- ks->rx_skb = ks8851_rx_skb_spi;
ks->flush_tx_work = ks8851_flush_tx_work_spi;
#define STD_IRQ (IRQ_LCI | /* Link Change */ \
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
index 3a1b1a1f5a19..60dd2fd603a8 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
@@ -731,7 +731,7 @@ static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
bool sgmii = false, inband_aneg = false;
int err;
- if (port->conf.inband) {
+ if (conf->inband) {
if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
conf->portmode == PHY_INTERFACE_MODE_QSGMII)
inband_aneg = true; /* Cisco-SGMII in-band-aneg */
@@ -948,7 +948,7 @@ int sparx5_port_pcs_set(struct sparx5 *sparx5,
if (err)
return -EINVAL;
- if (port->conf.inband) {
+ if (conf->inband) {
/* Enable/disable 1G counters in ASM */
spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
ASM_PORT_CFG_CSC_STAT_DIS,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
index 523e0c470894..55f255a3c9db 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
@@ -36,6 +36,27 @@ struct sparx5_tc_flower_template {
u16 l3_proto; /* protocol specified in the template */
};
+/* SparX-5 VCAP fragment types:
+ * 0 = no fragment, 1 = initial fragment,
+ * 2 = suspicious fragment, 3 = valid follow-up fragment
+ */
+enum { /* key / mask */
+ FRAG_NOT = 0x03, /* 0 / 3 */
+ FRAG_SOME = 0x11, /* 1 / 1 */
+ FRAG_FIRST = 0x13, /* 1 / 3 */
+ FRAG_LATER = 0x33, /* 3 / 3 */
+ FRAG_INVAL = 0xff, /* invalid */
+};
+
+/* Flower fragment flag to VCAP fragment type mapping */
+static const u8 sparx5_vcap_frag_map[4][4] = { /* is_frag */
+ { FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_FIRST }, /* 0/0 */
+ { FRAG_NOT, FRAG_NOT, FRAG_INVAL, FRAG_INVAL }, /* 0/1 */
+ { FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_INVAL }, /* 1/0 */
+ { FRAG_SOME, FRAG_LATER, FRAG_INVAL, FRAG_FIRST } /* 1/1 */
+ /* 0/0 0/1 1/0 1/1 <-- first_frag */
+};
+
static int
sparx5_tc_flower_es0_tpid(struct vcap_tc_flower_parse_usage *st)
{
@@ -145,29 +166,27 @@ sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
flow_rule_match_control(st->frule, &mt);
if (mt.mask->flags) {
- if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
- if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
- value = 1; /* initial fragment */
- mask = 0x3;
- } else {
- if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
- value = 3; /* follow up fragment */
- mask = 0x3;
- } else {
- value = 0; /* no fragment */
- mask = 0x3;
- }
- }
- } else {
- if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
- value = 3; /* follow up fragment */
- mask = 0x3;
- } else {
- value = 0; /* no fragment */
- mask = 0x3;
- }
+ u8 is_frag_key = !!(mt.key->flags & FLOW_DIS_IS_FRAGMENT);
+ u8 is_frag_mask = !!(mt.mask->flags & FLOW_DIS_IS_FRAGMENT);
+ u8 is_frag_idx = (is_frag_key << 1) | is_frag_mask;
+
+ u8 first_frag_key = !!(mt.key->flags & FLOW_DIS_FIRST_FRAG);
+ u8 first_frag_mask = !!(mt.mask->flags & FLOW_DIS_FIRST_FRAG);
+ u8 first_frag_idx = (first_frag_key << 1) | first_frag_mask;
+
+ /* Lookup verdict based on the 2 + 2 input bits */
+ u8 vdt = sparx5_vcap_frag_map[is_frag_idx][first_frag_idx];
+
+ if (vdt == FRAG_INVAL) {
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack,
+ "Match on invalid fragment flag combination");
+ return -EINVAL;
}
+ /* Extract VCAP fragment key and mask from verdict */
+ value = (vdt >> 4) & 0x3;
+ mask = vdt & 0x3;
+
err = vcap_rule_add_key_u32(st->vrule,
VCAP_KF_L3_FRAGMENT_TYPE,
value, mask);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index a5ac21a0ee33..cb6b33a228ea 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1868,8 +1868,8 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
struct flow_cls_offload *f)
{
struct qede_arfs_fltr_node *n;
- int min_hlen, rc = -EINVAL;
struct qede_arfs_tuple t;
+ int min_hlen, rc;
__qede_lock(edev);
@@ -1879,7 +1879,8 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
}
/* parse flower attribute and prepare filter */
- if (qede_parse_flow_attr(edev, proto, f->rule, &t))
+ rc = qede_parse_flow_attr(edev, proto, f->rule, &t);
+ if (rc)
goto unlock;
/* Validate profile mode and number of filters */
@@ -1888,11 +1889,13 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
DP_NOTICE(edev,
"Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
t.mode, edev->arfs->mode, edev->arfs->filter_count);
+ rc = -EINVAL;
goto unlock;
}
/* parse tc actions and get the vf_id */
- if (qede_parse_actions(edev, &f->rule->action, f->common.extack))
+ rc = qede_parse_actions(edev, &f->rule->action, f->common.extack);
+ if (rc)
goto unlock;
if (qede_flow_find_fltr(edev, &t)) {
@@ -1998,10 +2001,9 @@ static int qede_flow_spec_to_rule(struct qede_dev *edev,
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (qede_parse_flow_attr(edev, proto, flow->rule, t)) {
- err = -EINVAL;
+ err = qede_parse_flow_attr(edev, proto, flow->rule, t);
+ if (err)
goto err_out;
- }
/* Make sure location is valid and filter isn't already set */
err = qede_flow_spec_validate(edev, &flow->rule->action, t,
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 4c043052198d..00882ffc7a02 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -73,6 +73,7 @@ enum mac_version {
};
struct rtl8169_private;
+struct r8169_led_classdev;
void r8169_apply_firmware(struct rtl8169_private *tp);
u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp);
@@ -84,7 +85,8 @@ void r8169_get_led_name(struct rtl8169_private *tp, int idx,
char *buf, int buf_len);
int rtl8168_get_led_mode(struct rtl8169_private *tp);
int rtl8168_led_mod_ctrl(struct rtl8169_private *tp, u16 mask, u16 val);
-void rtl8168_init_leds(struct net_device *ndev);
+struct r8169_led_classdev *rtl8168_init_leds(struct net_device *ndev);
int rtl8125_get_led_mode(struct rtl8169_private *tp, int index);
int rtl8125_set_led_mode(struct rtl8169_private *tp, int index, u16 mode);
-void rtl8125_init_leds(struct net_device *ndev);
+struct r8169_led_classdev *rtl8125_init_leds(struct net_device *ndev);
+void r8169_remove_leds(struct r8169_led_classdev *leds);
diff --git a/drivers/net/ethernet/realtek/r8169_leds.c b/drivers/net/ethernet/realtek/r8169_leds.c
index 7c5dc9d0df85..e10bee706bc6 100644
--- a/drivers/net/ethernet/realtek/r8169_leds.c
+++ b/drivers/net/ethernet/realtek/r8169_leds.c
@@ -146,22 +146,22 @@ static void rtl8168_setup_ldev(struct r8169_led_classdev *ldev,
led_cdev->hw_control_get_device = r8169_led_hw_control_get_device;
/* ignore errors */
- devm_led_classdev_register(&ndev->dev, led_cdev);
+ led_classdev_register(&ndev->dev, led_cdev);
}
-void rtl8168_init_leds(struct net_device *ndev)
+struct r8169_led_classdev *rtl8168_init_leds(struct net_device *ndev)
{
- /* bind resource mgmt to netdev */
- struct device *dev = &ndev->dev;
struct r8169_led_classdev *leds;
int i;
- leds = devm_kcalloc(dev, RTL8168_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ leds = kcalloc(RTL8168_NUM_LEDS + 1, sizeof(*leds), GFP_KERNEL);
if (!leds)
- return;
+ return NULL;
for (i = 0; i < RTL8168_NUM_LEDS; i++)
rtl8168_setup_ldev(leds + i, ndev, i);
+
+ return leds;
}
static int rtl8125_led_hw_control_is_supported(struct led_classdev *led_cdev,
@@ -245,20 +245,31 @@ static void rtl8125_setup_led_ldev(struct r8169_led_classdev *ldev,
led_cdev->hw_control_get_device = r8169_led_hw_control_get_device;
/* ignore errors */
- devm_led_classdev_register(&ndev->dev, led_cdev);
+ led_classdev_register(&ndev->dev, led_cdev);
}
-void rtl8125_init_leds(struct net_device *ndev)
+struct r8169_led_classdev *rtl8125_init_leds(struct net_device *ndev)
{
- /* bind resource mgmt to netdev */
- struct device *dev = &ndev->dev;
struct r8169_led_classdev *leds;
int i;
- leds = devm_kcalloc(dev, RTL8125_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ leds = kcalloc(RTL8125_NUM_LEDS + 1, sizeof(*leds), GFP_KERNEL);
if (!leds)
- return;
+ return NULL;
for (i = 0; i < RTL8125_NUM_LEDS; i++)
rtl8125_setup_led_ldev(leds + i, ndev, i);
+
+ return leds;
+}
+
+void r8169_remove_leds(struct r8169_led_classdev *leds)
+{
+ if (!leds)
+ return;
+
+ for (struct r8169_led_classdev *l = leds; l->ndev; l++)
+ led_classdev_unregister(&l->led);
+
+ kfree(leds);
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 6f1e6f386b7b..0fc5fe564ae5 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -647,6 +647,8 @@ struct rtl8169_private {
const char *fw_name;
struct rtl_fw *rtl_fw;
+ struct r8169_led_classdev *leds;
+
u32 ocp_base;
};
@@ -5044,6 +5046,9 @@ static void rtl_remove_one(struct pci_dev *pdev)
cancel_work_sync(&tp->wk.work);
+ if (IS_ENABLED(CONFIG_R8169_LEDS))
+ r8169_remove_leds(tp->leds);
+
unregister_netdev(tp->dev);
if (tp->dash_type != RTL_DASH_NONE)
@@ -5501,9 +5506,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (IS_ENABLED(CONFIG_R8169_LEDS)) {
if (rtl_is_8125(tp))
- rtl8125_init_leds(dev);
+ tp->leds = rtl8125_init_leds(dev);
else if (tp->mac_version > RTL_GIGA_MAC_VER_06)
- rtl8168_init_leds(dev);
+ tp->leds = rtl8168_init_leds(dev);
}
netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ba01c8cc3c90..9b1f639f64a1 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -769,25 +769,28 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
dma_addr_t dma_addr;
int rx_packets = 0;
u8 desc_status;
- u16 pkt_len;
+ u16 desc_len;
u8 die_dt;
int entry;
int limit;
int i;
- entry = priv->cur_rx[q] % priv->num_rx_ring[q];
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
stats = &priv->stats[q];
- desc = &priv->rx_ring[q].desc[entry];
- for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
+ for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
+ entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->rx_ring[q].desc[entry];
+ if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
+ break;
+
/* Descriptor type must be checked before all other reads */
dma_rmb();
desc_status = desc->msc;
- pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
+ desc_len = le16_to_cpu(desc->ds_cc) & RX_DS;
/* We use 0-byte descriptors to mark the DMA mapping errors */
- if (!pkt_len)
+ if (!desc_len)
continue;
if (desc_status & MSC_MC)
@@ -808,25 +811,25 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
switch (die_dt) {
case DT_FSINGLE:
skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_put(skb, pkt_len);
+ skb_put(skb, desc_len);
skb->protocol = eth_type_trans(skb, ndev);
if (ndev->features & NETIF_F_RXCSUM)
ravb_rx_csum_gbeth(skb);
napi_gro_receive(&priv->napi[q], skb);
rx_packets++;
- stats->rx_bytes += pkt_len;
+ stats->rx_bytes += desc_len;
break;
case DT_FSTART:
priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_put(priv->rx_1st_skb, pkt_len);
+ skb_put(priv->rx_1st_skb, desc_len);
break;
case DT_FMID:
skb = ravb_get_skb_gbeth(ndev, entry, desc);
skb_copy_to_linear_data_offset(priv->rx_1st_skb,
priv->rx_1st_skb->len,
skb->data,
- pkt_len);
- skb_put(priv->rx_1st_skb, pkt_len);
+ desc_len);
+ skb_put(priv->rx_1st_skb, desc_len);
dev_kfree_skb(skb);
break;
case DT_FEND:
@@ -834,23 +837,20 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
skb_copy_to_linear_data_offset(priv->rx_1st_skb,
priv->rx_1st_skb->len,
skb->data,
- pkt_len);
- skb_put(priv->rx_1st_skb, pkt_len);
+ desc_len);
+ skb_put(priv->rx_1st_skb, desc_len);
dev_kfree_skb(skb);
priv->rx_1st_skb->protocol =
eth_type_trans(priv->rx_1st_skb, ndev);
if (ndev->features & NETIF_F_RXCSUM)
- ravb_rx_csum_gbeth(skb);
+ ravb_rx_csum_gbeth(priv->rx_1st_skb);
+ stats->rx_bytes += priv->rx_1st_skb->len;
napi_gro_receive(&priv->napi[q],
priv->rx_1st_skb);
rx_packets++;
- stats->rx_bytes += pkt_len;
break;
}
}
-
- entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q].desc[entry];
}
/* Refill the RX ring buffers. */
@@ -891,30 +891,29 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
- int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
- priv->cur_rx[q];
struct net_device_stats *stats = &priv->stats[q];
struct ravb_ex_rx_desc *desc;
+ unsigned int limit, i;
struct sk_buff *skb;
dma_addr_t dma_addr;
struct timespec64 ts;
+ int rx_packets = 0;
u8 desc_status;
u16 pkt_len;
- int limit;
+ int entry;
+
+ limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
+ for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
+ entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->rx_ring[q].ex_desc[entry];
+ if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
+ break;
- boguscnt = min(boguscnt, *quota);
- limit = boguscnt;
- desc = &priv->rx_ring[q].ex_desc[entry];
- while (desc->die_dt != DT_FEMPTY) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
desc_status = desc->msc;
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
- if (--boguscnt < 0)
- break;
-
/* We use 0-byte descriptors to mark the DMA mapping errors */
if (!pkt_len)
continue;
@@ -960,12 +959,9 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
if (ndev->features & NETIF_F_RXCSUM)
ravb_rx_csum(skb);
napi_gro_receive(&priv->napi[q], skb);
- stats->rx_packets++;
+ rx_packets++;
stats->rx_bytes += pkt_len;
}
-
- entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q].ex_desc[entry];
}
/* Refill the RX ring buffers. */
@@ -995,9 +991,9 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
desc->die_dt = DT_FEMPTY;
}
- *quota -= limit - (++boguscnt);
-
- return boguscnt <= 0;
+ stats->rx_packets += rx_packets;
+ *quota -= rx_packets;
+ return *quota == 0;
}
/* Packet receive function for Ethernet AVB */
@@ -2726,19 +2722,18 @@ static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
struct platform_device *pdev = priv->pdev;
struct net_device *ndev = priv->ndev;
struct device *dev = &pdev->dev;
- const char *dev_name;
+ const char *devname = dev_name(dev);
unsigned long flags;
int error, irq_num;
if (irq_name) {
- dev_name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
- if (!dev_name)
+ devname = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", devname, ch);
+ if (!devname)
return -ENOMEM;
irq_num = platform_get_irq_byname(pdev, irq_name);
flags = 0;
} else {
- dev_name = ndev->name;
irq_num = platform_get_irq(pdev, 0);
flags = IRQF_SHARED;
}
@@ -2748,9 +2743,9 @@ static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
if (irq)
*irq = irq_num;
- error = devm_request_irq(dev, irq_num, handler, flags, dev_name, ndev);
+ error = devm_request_irq(dev, irq_num, handler, flags, devname, ndev);
if (error)
- netdev_err(ndev, "cannot request IRQ %s\n", dev_name);
+ netdev_err(ndev, "cannot request IRQ %s\n", devname);
return error;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index a6fefe675ef1..3b7d4ac1e7be 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -553,6 +553,7 @@ extern const struct stmmac_hwtimestamp stmmac_ptp;
extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
struct mac_link {
+ u32 caps;
u32 speed_mask;
u32 speed10;
u32 speed100;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index b21d99faa2d0..e1b761dcfa1d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -1096,6 +1096,8 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
/* The loopback bit seems to be re-set when link change
* Simply mask it each time
* Speed 10/100/1000 are set in BIT(2)/BIT(3)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 3927609abc44..8555299443f4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -539,6 +539,8 @@ int dwmac1000_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
mac->link.duplex = GMAC_CONTROL_DM;
mac->link.speed10 = GMAC_CONTROL_PS;
mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index a6e8d7bd9588..7667d103cd0e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -175,6 +175,8 @@ int dwmac100_setup(struct stmmac_priv *priv)
dev_info(priv->device, "\tDWMAC100\n");
mac->pcsr = priv->ioaddr;
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100;
mac->link.duplex = MAC_CONTROL_F;
mac->link.speed10 = 0;
mac->link.speed100 = 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index cef25efbdff9..a38226d7cc6a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -70,7 +70,10 @@ static void dwmac4_core_init(struct mac_device_info *hw,
static void dwmac4_phylink_get_caps(struct stmmac_priv *priv)
{
- priv->phylink_config.mac_capabilities |= MAC_2500FD;
+ if (priv->plat->tx_queues_to_use > 1)
+ priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD);
+ else
+ priv->hw->link.caps |= (MAC_10HD | MAC_100HD | MAC_1000HD);
}
static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
@@ -1378,6 +1381,8 @@ int dwmac4_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
mac->link.duplex = GMAC_CONFIG_DM;
mac->link.speed10 = GMAC_CONFIG_PS;
mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index e841e312077e..f8e7775bb633 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -47,14 +47,6 @@ static void dwxgmac2_core_init(struct mac_device_info *hw,
writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
}
-static void xgmac_phylink_get_caps(struct stmmac_priv *priv)
-{
- priv->phylink_config.mac_capabilities |= MAC_2500FD | MAC_5000FD |
- MAC_10000FD | MAC_25000FD |
- MAC_40000FD | MAC_50000FD |
- MAC_100000FD;
-}
-
static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
{
u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
@@ -1540,7 +1532,6 @@ static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
- .phylink_get_caps = xgmac_phylink_get_caps,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxgmac2_rx_queue_enable,
@@ -1601,7 +1592,6 @@ static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
const struct stmmac_ops dwxlgmac2_ops = {
.core_init = dwxgmac2_core_init,
- .phylink_get_caps = xgmac_phylink_get_caps,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxlgmac2_rx_queue_enable,
@@ -1661,6 +1651,9 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_1000FD | MAC_2500FD | MAC_5000FD |
+ MAC_10000FD;
mac->link.duplex = 0;
mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
@@ -1698,6 +1691,11 @@ int dwxlgmac2_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_1000FD | MAC_2500FD | MAC_5000FD |
+ MAC_10000FD | MAC_25000FD |
+ MAC_40000FD | MAC_50000FD |
+ MAC_100000FD;
mac->link.duplex = 0;
mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index dff02d75d519..5d1ea3e07459 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -52,6 +52,7 @@ struct stmmac_counters {
unsigned int mmc_tx_excessdef;
unsigned int mmc_tx_pause_frame;
unsigned int mmc_tx_vlan_frame_g;
+ unsigned int mmc_tx_oversize_g;
unsigned int mmc_tx_lpi_usec;
unsigned int mmc_tx_lpi_tran;
@@ -80,6 +81,7 @@ struct stmmac_counters {
unsigned int mmc_rx_fifo_overflow;
unsigned int mmc_rx_vlan_frames_gb;
unsigned int mmc_rx_watchdog_error;
+ unsigned int mmc_rx_error;
unsigned int mmc_rx_lpi_usec;
unsigned int mmc_rx_lpi_tran;
unsigned int mmc_rx_discard_frames_gb;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 7eb477faa75a..0fab842902a8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -53,6 +53,7 @@
#define MMC_TX_EXCESSDEF 0x6c
#define MMC_TX_PAUSE_FRAME 0x70
#define MMC_TX_VLAN_FRAME_G 0x74
+#define MMC_TX_OVERSIZE_G 0x78
/* MMC RX counter registers */
#define MMC_RX_FRAMECOUNT_GB 0x80
@@ -79,6 +80,13 @@
#define MMC_RX_FIFO_OVERFLOW 0xd4
#define MMC_RX_VLAN_FRAMES_GB 0xd8
#define MMC_RX_WATCHDOG_ERROR 0xdc
+#define MMC_RX_ERROR 0xe0
+
+#define MMC_TX_LPI_USEC 0xec
+#define MMC_TX_LPI_TRAN 0xf0
+#define MMC_RX_LPI_USEC 0xf4
+#define MMC_RX_LPI_TRAN 0xf8
+
/* IPC*/
#define MMC_RX_IPC_INTR_MASK 0x100
#define MMC_RX_IPC_INTR 0x108
@@ -283,6 +291,9 @@ static void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
mmc->mmc_tx_excessdef += readl(mmcaddr + MMC_TX_EXCESSDEF);
mmc->mmc_tx_pause_frame += readl(mmcaddr + MMC_TX_PAUSE_FRAME);
mmc->mmc_tx_vlan_frame_g += readl(mmcaddr + MMC_TX_VLAN_FRAME_G);
+ mmc->mmc_tx_oversize_g += readl(mmcaddr + MMC_TX_OVERSIZE_G);
+ mmc->mmc_tx_lpi_usec += readl(mmcaddr + MMC_TX_LPI_USEC);
+ mmc->mmc_tx_lpi_tran += readl(mmcaddr + MMC_TX_LPI_TRAN);
/* MMC RX counter registers */
mmc->mmc_rx_framecount_gb += readl(mmcaddr + MMC_RX_FRAMECOUNT_GB);
@@ -316,6 +327,10 @@ static void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
mmc->mmc_rx_fifo_overflow += readl(mmcaddr + MMC_RX_FIFO_OVERFLOW);
mmc->mmc_rx_vlan_frames_gb += readl(mmcaddr + MMC_RX_VLAN_FRAMES_GB);
mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_RX_WATCHDOG_ERROR);
+ mmc->mmc_rx_error += readl(mmcaddr + MMC_RX_ERROR);
+ mmc->mmc_rx_lpi_usec += readl(mmcaddr + MMC_RX_LPI_USEC);
+ mmc->mmc_rx_lpi_tran += readl(mmcaddr + MMC_RX_LPI_TRAN);
+
/* IPv4 */
mmc->mmc_rx_ipv4_gd += readl(mmcaddr + MMC_RX_IPV4_GD);
mmc->mmc_rx_ipv4_hderr += readl(mmcaddr + MMC_RX_IPV4_HDERR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index e1537a57815f..542e2633a6f5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -212,6 +212,7 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_tx_excessdef),
STMMAC_MMC_STAT(mmc_tx_pause_frame),
STMMAC_MMC_STAT(mmc_tx_vlan_frame_g),
+ STMMAC_MMC_STAT(mmc_tx_oversize_g),
STMMAC_MMC_STAT(mmc_tx_lpi_usec),
STMMAC_MMC_STAT(mmc_tx_lpi_tran),
STMMAC_MMC_STAT(mmc_rx_framecount_gb),
@@ -238,6 +239,7 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_rx_fifo_overflow),
STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb),
STMMAC_MMC_STAT(mmc_rx_watchdog_error),
+ STMMAC_MMC_STAT(mmc_rx_error),
STMMAC_MMC_STAT(mmc_rx_lpi_usec),
STMMAC_MMC_STAT(mmc_rx_lpi_tran),
STMMAC_MMC_STAT(mmc_rx_discard_frames_gb),
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 24cd80490d19..7c6fb14b5555 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1198,17 +1198,6 @@ static int stmmac_init_phy(struct net_device *dev)
return ret;
}
-static void stmmac_set_half_duplex(struct stmmac_priv *priv)
-{
- /* Half-Duplex can only work with single tx queue */
- if (priv->plat->tx_queues_to_use > 1)
- priv->phylink_config.mac_capabilities &=
- ~(MAC_10HD | MAC_100HD | MAC_1000HD);
- else
- priv->phylink_config.mac_capabilities |=
- (MAC_10HD | MAC_100HD | MAC_1000HD);
-}
-
static int stmmac_phy_setup(struct stmmac_priv *priv)
{
struct stmmac_mdio_bus_data *mdio_bus_data;
@@ -1236,15 +1225,11 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
xpcs_get_interfaces(priv->hw->xpcs,
priv->phylink_config.supported_interfaces);
- priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_10FD | MAC_100FD |
- MAC_1000FD;
-
- stmmac_set_half_duplex(priv);
-
/* Get the MAC specific capabilities */
stmmac_mac_phylink_get_caps(priv);
+ priv->phylink_config.mac_capabilities = priv->hw->link.caps;
+
max_speed = priv->plat->max_speed;
if (max_speed)
phylink_limit_mac_speed(&priv->phylink_config, max_speed);
@@ -7342,6 +7327,7 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
{
struct stmmac_priv *priv = netdev_priv(dev);
int ret = 0, i;
+ int max_speed;
if (netif_running(dev))
stmmac_release(dev);
@@ -7355,7 +7341,14 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
priv->rss.table[i] = ethtool_rxfh_indir_default(i,
rx_cnt);
- stmmac_set_half_duplex(priv);
+ stmmac_mac_phylink_get_caps(priv);
+
+ priv->phylink_config.mac_capabilities = priv->hw->link.caps;
+
+ max_speed = priv->plat->max_speed;
+ if (max_speed)
+ phylink_limit_mac_speed(&priv->phylink_config, max_speed);
+
stmmac_napi_add(dev);
if (netif_running(dev))
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 2939a21ca74f..1d00e21808c1 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -2793,6 +2793,8 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
{
+ struct am65_cpsw_rx_chn *rx_chan = &common->rx_chns;
+ struct am65_cpsw_tx_chn *tx_chan = common->tx_chns;
struct device *dev = common->dev;
struct am65_cpsw_port *port;
int ret = 0, i;
@@ -2805,6 +2807,22 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
if (ret)
return ret;
+ /* The DMA Channels are not guaranteed to be in a clean state.
+ * Reset and disable them to ensure that they are back to the
+ * clean state and ready to be used.
+ */
+ for (i = 0; i < common->tx_ch_num; i++) {
+ k3_udma_glue_reset_tx_chn(tx_chan[i].tx_chn, &tx_chan[i],
+ am65_cpsw_nuss_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn);
+ }
+
+ for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
+ k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, rx_chan,
+ am65_cpsw_nuss_rx_cleanup, !!i);
+
+ k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
+
ret = am65_cpsw_nuss_register_devlink(common);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index c66618d91c28..f89716b1cfb6 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -784,6 +784,11 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
struct am65_cpts_skb_cb_data *skb_cb =
(struct am65_cpts_skb_cb_data *)skb->cb;
+ if ((ptp_classify_raw(skb) & PTP_CLASS_V1) &&
+ ((mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK) ==
+ (skb_cb->skb_mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK)))
+ mtype_seqid = skb_cb->skb_mtype_seqid;
+
if (mtype_seqid == skb_cb->skb_mtype_seqid) {
u64 ns = event->timestamp;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index cf7b73f8f450..b69af69a1ccd 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -421,12 +421,14 @@ static int prueth_init_rx_chns(struct prueth_emac *emac,
if (!i)
fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
i);
- rx_chn->irq[i] = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
- if (rx_chn->irq[i] <= 0) {
- ret = rx_chn->irq[i];
+ ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
+ if (ret <= 0) {
+ if (!ret)
+ ret = -ENXIO;
netdev_err(ndev, "Failed to get rx dma irq");
goto fail;
}
+ rx_chn->irq[i] = ret;
}
return 0;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 6dff2c85682d..6fae161cbcb8 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -1598,7 +1598,7 @@ static void wx_set_num_queues(struct wx *wx)
*/
static int wx_acquire_msix_vectors(struct wx *wx)
{
- struct irq_affinity affd = {0, };
+ struct irq_affinity affd = { .pre_vectors = 1 };
int nvecs, i;
/* We start by asking for one vector per queue pair */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 2fa511227eac..93295916b1d2 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -20,8 +20,6 @@
#include "txgbe_phy.h"
#include "txgbe_hw.h"
-#define TXGBE_I2C_CLK_DEV_NAME "i2c_dw"
-
static int txgbe_swnodes_register(struct txgbe *txgbe)
{
struct txgbe_nodes *nodes = &txgbe->nodes;
@@ -573,8 +571,8 @@ static int txgbe_clock_register(struct txgbe *txgbe)
char clk_name[32];
struct clk *clk;
- snprintf(clk_name, sizeof(clk_name), "%s.%d",
- TXGBE_I2C_CLK_DEV_NAME, pci_dev_id(pdev));
+ snprintf(clk_name, sizeof(clk_name), "i2c_designware.%d",
+ pci_dev_id(pdev));
clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000);
if (IS_ERR(clk))
@@ -636,7 +634,7 @@ static int txgbe_i2c_register(struct txgbe *txgbe)
info.parent = &pdev->dev;
info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_I2C]);
- info.name = TXGBE_I2C_CLK_DEV_NAME;
+ info.name = "i2c_designware";
info.id = pci_dev_id(pdev);
info.res = &DEFINE_RES_IRQ(pdev->irq);
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 5fbe33a09bb0..324b34f3ac93 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -156,7 +156,6 @@ static void fjes_acpi_remove(struct acpi_device *device)
static struct acpi_driver fjes_acpi_driver = {
.name = DRV_NAME,
.class = DRV_NAME,
- .owner = THIS_MODULE,
.ids = fjes_acpi_ids,
.ops = {
.add = fjes_acpi_add,
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 2f6739fe78af..6c2835086b57 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -822,7 +822,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!pskb_inet_may_pull(skb))
+ if (!skb_vlan_inet_prepare(skb))
return -EINVAL;
if (!gs4)
@@ -929,7 +929,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!pskb_inet_may_pull(skb))
+ if (!skb_vlan_inet_prepare(skb))
return -EINVAL;
if (!gs6)
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index ba4704c2c640..e62d6cbdf9bc 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1098,11 +1098,12 @@ out_hashtable:
static void gtp_dellink(struct net_device *dev, struct list_head *head)
{
struct gtp_dev *gtp = netdev_priv(dev);
+ struct hlist_node *next;
struct pdp_ctx *pctx;
int i;
for (i = 0; i < gtp->hash_size; i++)
- hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
+ hlist_for_each_entry_safe(pctx, next, &gtp->tid_hash[i], hlist_tid)
pdp_context_delete(pctx);
list_del_rcu(&gtp->list);
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index a6fcbda64ecc..2b6ec979a62f 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -154,8 +154,11 @@ static void free_netvsc_device(struct rcu_head *head)
int i;
kfree(nvdev->extension);
- vfree(nvdev->recv_buf);
- vfree(nvdev->send_buf);
+
+ if (!nvdev->recv_buf_gpadl_handle.decrypted)
+ vfree(nvdev->recv_buf);
+ if (!nvdev->send_buf_gpadl_handle.decrypted)
+ vfree(nvdev->send_buf);
bitmap_free(nvdev->send_section_map);
for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 0206b84284ab..ff016c11b4a0 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -999,10 +999,12 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
struct metadata_dst *md_dst;
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
+ bool is_macsec_md_dst;
rcu_read_lock();
rxd = macsec_data_rcu(skb->dev);
md_dst = skb_metadata_dst(skb);
+ is_macsec_md_dst = md_dst && md_dst->type == METADATA_MACSEC;
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct sk_buff *nskb;
@@ -1013,14 +1015,42 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
* the SecTAG, so we have to deduce which port to deliver to.
*/
if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
- struct macsec_rx_sc *rx_sc = NULL;
+ const struct macsec_ops *ops;
- if (md_dst && md_dst->type == METADATA_MACSEC)
- rx_sc = find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci);
+ ops = macsec_get_ops(macsec, NULL);
- if (md_dst && md_dst->type == METADATA_MACSEC && !rx_sc)
+ if (ops->rx_uses_md_dst && !is_macsec_md_dst)
continue;
+ if (is_macsec_md_dst) {
+ struct macsec_rx_sc *rx_sc;
+
+ /* All drivers that implement MACsec offload
+ * support using skb metadata destinations must
+ * indicate that they do so.
+ */
+ DEBUG_NET_WARN_ON_ONCE(!ops->rx_uses_md_dst);
+ rx_sc = find_rx_sc(&macsec->secy,
+ md_dst->u.macsec_info.sci);
+ if (!rx_sc)
+ continue;
+ /* device indicated macsec offload occurred */
+ skb->dev = ndev;
+ skb->pkt_type = PACKET_HOST;
+ eth_skb_pkt_type(skb, ndev);
+ ret = RX_HANDLER_ANOTHER;
+ goto out;
+ }
+
+ /* This datapath is insecure because it is unable to
+ * enforce isolation of broadcast/multicast traffic and
+ * unicast traffic with promiscuous mode on the macsec
+ * netdev. Since the core stack has no mechanism to
+ * check that the hardware did indeed receive MACsec
+ * traffic, it is possible that the response handling
+ * done by the MACsec port was to a plaintext packet.
+ * This violates the MACsec protocol standard.
+ */
if (ether_addr_equal_64bits(hdr->h_dest,
ndev->dev_addr)) {
/* exact match, divert skb to this port */
@@ -1036,14 +1066,10 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
break;
nskb->dev = ndev;
- if (ether_addr_equal_64bits(hdr->h_dest,
- ndev->broadcast))
- nskb->pkt_type = PACKET_BROADCAST;
- else
- nskb->pkt_type = PACKET_MULTICAST;
+ eth_skb_pkt_type(nskb, ndev);
__netif_rx(nskb);
- } else if (rx_sc || ndev->flags & IFF_PROMISC) {
+ } else if (ndev->flags & IFF_PROMISC) {
skb->dev = ndev;
skb->pkt_type = PACKET_HOST;
ret = RX_HANDLER_ANOTHER;
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index fa8c6fdcf301..d7aaefb5226b 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -695,7 +695,8 @@ static int dp83869_configure_mode(struct phy_device *phydev,
phy_ctrl_val = dp83869->mode;
if (phydev->interface == PHY_INTERFACE_MODE_MII) {
if (dp83869->mode == DP83869_100M_MEDIA_CONVERT ||
- dp83869->mode == DP83869_RGMII_100_BASE) {
+ dp83869->mode == DP83869_RGMII_100_BASE ||
+ dp83869->mode == DP83869_RGMII_COPPER_ETHERNET) {
phy_ctrl_val |= DP83869_OP_MODE_MII;
} else {
phydev_err(phydev, "selected op-mode is not valid with MII mode\n");
diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c
index 6b4bd9883304..c812f16eaa3a 100644
--- a/drivers/net/phy/marvell-88q2xxx.c
+++ b/drivers/net/phy/marvell-88q2xxx.c
@@ -12,6 +12,8 @@
#include <linux/hwmon.h>
#define PHY_ID_88Q2220_REVB0 (MARVELL_PHY_ID_88Q2220 | 0x1)
+#define PHY_ID_88Q2220_REVB1 (MARVELL_PHY_ID_88Q2220 | 0x2)
+#define PHY_ID_88Q2220_REVB2 (MARVELL_PHY_ID_88Q2220 | 0x3)
#define MDIO_MMD_AN_MV_STAT 32769
#define MDIO_MMD_AN_MV_STAT_ANEG 0x0100
@@ -129,6 +131,49 @@ static const struct mmd_val mv88q222x_revb0_init_seq1[] = {
{ MDIO_MMD_PCS, 0xfe05, 0x755c },
};
+static const struct mmd_val mv88q222x_revb1_init_seq0[] = {
+ { MDIO_MMD_PCS, 0xffe4, 0x0007 },
+ { MDIO_MMD_AN, MDIO_AN_T1_CTRL, 0x0 },
+ { MDIO_MMD_PCS, 0xffe3, 0x7000 },
+ { MDIO_MMD_PMAPMD, MDIO_CTRL1, 0x0840 },
+};
+
+static const struct mmd_val mv88q222x_revb2_init_seq0[] = {
+ { MDIO_MMD_PCS, 0xffe4, 0x0007 },
+ { MDIO_MMD_AN, MDIO_AN_T1_CTRL, 0x0 },
+ { MDIO_MMD_PMAPMD, MDIO_CTRL1, 0x0840 },
+};
+
+static const struct mmd_val mv88q222x_revb1_revb2_init_seq1[] = {
+ { MDIO_MMD_PCS, 0xfe07, 0x125a },
+ { MDIO_MMD_PCS, 0xfe09, 0x1288 },
+ { MDIO_MMD_PCS, 0xfe08, 0x2588 },
+ { MDIO_MMD_PCS, 0xfe72, 0x042c },
+ { MDIO_MMD_PCS, 0xffe4, 0x0071 },
+ { MDIO_MMD_PCS, 0xffe4, 0x0001 },
+ { MDIO_MMD_PCS, 0xfe1b, 0x0048 },
+ { MDIO_MMD_PMAPMD, 0x0000, 0x0000 },
+ { MDIO_MMD_PCS, 0x0000, 0x0000 },
+ { MDIO_MMD_PCS, 0xffdb, 0xfc10 },
+ { MDIO_MMD_PCS, 0xfe1b, 0x58 },
+ { MDIO_MMD_PCS, 0xfcad, 0x030c },
+ { MDIO_MMD_PCS, 0x8032, 0x6001 },
+ { MDIO_MMD_PCS, 0xfdff, 0x05a5 },
+ { MDIO_MMD_PCS, 0xfdec, 0xdbaf },
+ { MDIO_MMD_PCS, 0xfcab, 0x1054 },
+ { MDIO_MMD_PCS, 0xfcac, 0x1483 },
+ { MDIO_MMD_PCS, 0x8033, 0xc801 },
+ { MDIO_MMD_AN, 0x8032, 0x2020 },
+ { MDIO_MMD_AN, 0x8031, 0xa28 },
+ { MDIO_MMD_AN, 0x8031, 0xc28 },
+ { MDIO_MMD_PCS, 0xfbba, 0x0cb2 },
+ { MDIO_MMD_PCS, 0xfbbb, 0x0c4a },
+ { MDIO_MMD_PCS, 0xfe5f, 0xe8 },
+ { MDIO_MMD_PCS, 0xfe05, 0x755c },
+ { MDIO_MMD_PCS, 0xfa20, 0x002a },
+ { MDIO_MMD_PCS, 0xfe11, 0x1105 },
+};
+
static int mv88q2xxx_soft_reset(struct phy_device *phydev)
{
int ret;
@@ -687,31 +732,72 @@ static int mv88q222x_soft_reset(struct phy_device *phydev)
return 0;
}
-static int mv88q222x_revb0_config_init(struct phy_device *phydev)
+static int mv88q222x_write_mmd_vals(struct phy_device *phydev,
+ const struct mmd_val *vals, size_t len)
{
- int ret, i;
+ int ret;
- for (i = 0; i < ARRAY_SIZE(mv88q222x_revb0_init_seq0); i++) {
- ret = phy_write_mmd(phydev, mv88q222x_revb0_init_seq0[i].devad,
- mv88q222x_revb0_init_seq0[i].regnum,
- mv88q222x_revb0_init_seq0[i].val);
+ for (; len; vals++, len--) {
+ ret = phy_write_mmd(phydev, vals->devad, vals->regnum,
+ vals->val);
if (ret < 0)
return ret;
}
+ return 0;
+}
+
+static int mv88q222x_revb0_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb0_init_seq0,
+ ARRAY_SIZE(mv88q222x_revb0_init_seq0));
+ if (ret < 0)
+ return ret;
+
usleep_range(5000, 10000);
- for (i = 0; i < ARRAY_SIZE(mv88q222x_revb0_init_seq1); i++) {
- ret = phy_write_mmd(phydev, mv88q222x_revb0_init_seq1[i].devad,
- mv88q222x_revb0_init_seq1[i].regnum,
- mv88q222x_revb0_init_seq1[i].val);
- if (ret < 0)
- return ret;
- }
+ ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb0_init_seq1,
+ ARRAY_SIZE(mv88q222x_revb0_init_seq1));
+ if (ret < 0)
+ return ret;
+
+ return mv88q2xxx_config_init(phydev);
+}
+
+static int mv88q222x_revb1_revb2_config_init(struct phy_device *phydev)
+{
+ bool is_rev_b1 = phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] == PHY_ID_88Q2220_REVB1;
+ int ret;
+
+ if (is_rev_b1)
+ ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb1_init_seq0,
+ ARRAY_SIZE(mv88q222x_revb1_init_seq0));
+ else
+ ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb2_init_seq0,
+ ARRAY_SIZE(mv88q222x_revb2_init_seq0));
+ if (ret < 0)
+ return ret;
+
+ usleep_range(3000, 5000);
+
+ ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb1_revb2_init_seq1,
+ ARRAY_SIZE(mv88q222x_revb1_revb2_init_seq1));
+ if (ret < 0)
+ return ret;
return mv88q2xxx_config_init(phydev);
}
+static int mv88q222x_config_init(struct phy_device *phydev)
+{
+ if (phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] == PHY_ID_88Q2220_REVB0)
+ return mv88q222x_revb0_config_init(phydev);
+ else
+ return mv88q222x_revb1_revb2_config_init(phydev);
+}
+
static int mv88q222x_cable_test_start(struct phy_device *phydev)
{
int ret;
@@ -810,14 +896,15 @@ static struct phy_driver mv88q2xxx_driver[] = {
.get_sqi_max = mv88q2xxx_get_sqi_max,
},
{
- PHY_ID_MATCH_EXACT(PHY_ID_88Q2220_REVB0),
+ .phy_id = MARVELL_PHY_ID_88Q2220,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "mv88q2220",
.flags = PHY_POLL_CABLE_TEST,
.probe = mv88q2xxx_probe,
.get_features = mv88q2xxx_get_features,
.config_aneg = mv88q2xxx_config_aneg,
.aneg_done = genphy_c45_aneg_done,
- .config_init = mv88q222x_revb0_config_init,
+ .config_init = mv88q222x_config_init,
.read_status = mv88q2xxx_read_status,
.soft_reset = mv88q222x_soft_reset,
.config_intr = mv88q2xxx_config_intr,
@@ -836,7 +923,7 @@ module_phy_driver(mv88q2xxx_driver);
static struct mdio_device_id __maybe_unused mv88q2xxx_tbl[] = {
{ MARVELL_PHY_ID_88Q2110, MARVELL_PHY_ID_MASK },
- { PHY_ID_MATCH_EXACT(PHY_ID_88Q2220_REVB0), },
+ { MARVELL_PHY_ID_88Q2220, MARVELL_PHY_ID_MASK },
{ /*sentinel*/ }
};
MODULE_DEVICE_TABLE(mdio, mv88q2xxx_tbl);
diff --git a/drivers/net/phy/mediatek-ge-soc.c b/drivers/net/phy/mediatek-ge-soc.c
index 0f3a1538a8b8..f4f9412d0cd7 100644
--- a/drivers/net/phy/mediatek-ge-soc.c
+++ b/drivers/net/phy/mediatek-ge-soc.c
@@ -216,6 +216,9 @@
#define MTK_PHY_LED_ON_LINK1000 BIT(0)
#define MTK_PHY_LED_ON_LINK100 BIT(1)
#define MTK_PHY_LED_ON_LINK10 BIT(2)
+#define MTK_PHY_LED_ON_LINK (MTK_PHY_LED_ON_LINK10 |\
+ MTK_PHY_LED_ON_LINK100 |\
+ MTK_PHY_LED_ON_LINK1000)
#define MTK_PHY_LED_ON_LINKDOWN BIT(3)
#define MTK_PHY_LED_ON_FDX BIT(4) /* Full duplex */
#define MTK_PHY_LED_ON_HDX BIT(5) /* Half duplex */
@@ -231,6 +234,12 @@
#define MTK_PHY_LED_BLINK_100RX BIT(3)
#define MTK_PHY_LED_BLINK_10TX BIT(4)
#define MTK_PHY_LED_BLINK_10RX BIT(5)
+#define MTK_PHY_LED_BLINK_RX (MTK_PHY_LED_BLINK_10RX |\
+ MTK_PHY_LED_BLINK_100RX |\
+ MTK_PHY_LED_BLINK_1000RX)
+#define MTK_PHY_LED_BLINK_TX (MTK_PHY_LED_BLINK_10TX |\
+ MTK_PHY_LED_BLINK_100TX |\
+ MTK_PHY_LED_BLINK_1000TX)
#define MTK_PHY_LED_BLINK_COLLISION BIT(6)
#define MTK_PHY_LED_BLINK_RX_CRC_ERR BIT(7)
#define MTK_PHY_LED_BLINK_RX_IDLE_ERR BIT(8)
@@ -1247,11 +1256,9 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
if (blink < 0)
return -EIO;
- if ((on & (MTK_PHY_LED_ON_LINK1000 | MTK_PHY_LED_ON_LINK100 |
- MTK_PHY_LED_ON_LINK10)) ||
- (blink & (MTK_PHY_LED_BLINK_1000RX | MTK_PHY_LED_BLINK_100RX |
- MTK_PHY_LED_BLINK_10RX | MTK_PHY_LED_BLINK_1000TX |
- MTK_PHY_LED_BLINK_100TX | MTK_PHY_LED_BLINK_10TX)))
+ if ((on & (MTK_PHY_LED_ON_LINK | MTK_PHY_LED_ON_FDX | MTK_PHY_LED_ON_HDX |
+ MTK_PHY_LED_ON_LINKDOWN)) ||
+ (blink & (MTK_PHY_LED_BLINK_RX | MTK_PHY_LED_BLINK_TX)))
set_bit(bit_netdev, &priv->led_state);
else
clear_bit(bit_netdev, &priv->led_state);
@@ -1269,7 +1276,7 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
if (!rules)
return 0;
- if (on & (MTK_PHY_LED_ON_LINK1000 | MTK_PHY_LED_ON_LINK100 | MTK_PHY_LED_ON_LINK10))
+ if (on & MTK_PHY_LED_ON_LINK)
*rules |= BIT(TRIGGER_NETDEV_LINK);
if (on & MTK_PHY_LED_ON_LINK10)
@@ -1287,10 +1294,10 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
if (on & MTK_PHY_LED_ON_HDX)
*rules |= BIT(TRIGGER_NETDEV_HALF_DUPLEX);
- if (blink & (MTK_PHY_LED_BLINK_1000RX | MTK_PHY_LED_BLINK_100RX | MTK_PHY_LED_BLINK_10RX))
+ if (blink & MTK_PHY_LED_BLINK_RX)
*rules |= BIT(TRIGGER_NETDEV_RX);
- if (blink & (MTK_PHY_LED_BLINK_1000TX | MTK_PHY_LED_BLINK_100TX | MTK_PHY_LED_BLINK_10TX))
+ if (blink & MTK_PHY_LED_BLINK_TX)
*rules |= BIT(TRIGGER_NETDEV_TX);
return 0;
@@ -1323,15 +1330,19 @@ static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
on |= MTK_PHY_LED_ON_LINK1000;
if (rules & BIT(TRIGGER_NETDEV_RX)) {
- blink |= MTK_PHY_LED_BLINK_10RX |
- MTK_PHY_LED_BLINK_100RX |
- MTK_PHY_LED_BLINK_1000RX;
+ blink |= (on & MTK_PHY_LED_ON_LINK) ?
+ (((on & MTK_PHY_LED_ON_LINK10) ? MTK_PHY_LED_BLINK_10RX : 0) |
+ ((on & MTK_PHY_LED_ON_LINK100) ? MTK_PHY_LED_BLINK_100RX : 0) |
+ ((on & MTK_PHY_LED_ON_LINK1000) ? MTK_PHY_LED_BLINK_1000RX : 0)) :
+ MTK_PHY_LED_BLINK_RX;
}
if (rules & BIT(TRIGGER_NETDEV_TX)) {
- blink |= MTK_PHY_LED_BLINK_10TX |
- MTK_PHY_LED_BLINK_100TX |
- MTK_PHY_LED_BLINK_1000TX;
+ blink |= (on & MTK_PHY_LED_ON_LINK) ?
+ (((on & MTK_PHY_LED_ON_LINK10) ? MTK_PHY_LED_BLINK_10TX : 0) |
+ ((on & MTK_PHY_LED_ON_LINK100) ? MTK_PHY_LED_BLINK_100TX : 0) |
+ ((on & MTK_PHY_LED_ON_LINK1000) ? MTK_PHY_LED_BLINK_1000TX : 0)) :
+ MTK_PHY_LED_BLINK_TX;
}
if (blink || on)
@@ -1344,9 +1355,7 @@ static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
MTK_PHY_LED0_ON_CTRL,
MTK_PHY_LED_ON_FDX |
MTK_PHY_LED_ON_HDX |
- MTK_PHY_LED_ON_LINK10 |
- MTK_PHY_LED_ON_LINK100 |
- MTK_PHY_LED_ON_LINK1000,
+ MTK_PHY_LED_ON_LINK,
on);
if (ret)
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 9f0495e8df4d..bfdd3875fe86 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -754,7 +754,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
skb_zcopy_init(skb, msg_control);
} else if (msg_control) {
struct ubuf_info *uarg = msg_control;
- uarg->callback(NULL, uarg, false);
+ uarg->ops->complete(NULL, uarg, false);
}
dev_queue_xmit(skb);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 0b3f21cba552..9254bca2813d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1906,7 +1906,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_zcopy_init(skb, msg_control);
} else if (msg_control) {
struct ubuf_info *uarg = msg_control;
- uarg->callback(NULL, uarg, false);
+ uarg->ops->complete(NULL, uarg, false);
}
skb_reset_network_header(skb);
@@ -2125,14 +2125,16 @@ static ssize_t tun_put_user(struct tun_struct *tun,
tun_is_little_endian(tun), true,
vlan_hlen)) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
- pr_err("unexpected GSO type: "
- "0x%x, gso_size %d, hdr_len %d\n",
- sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
- tun16_to_cpu(tun, gso.hdr_len));
- print_hex_dump(KERN_ERR, "tun: ",
- DUMP_PREFIX_NONE,
- 16, 1, skb->head,
- min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
+
+ if (net_ratelimit()) {
+ netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
+ sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
+ tun16_to_cpu(tun, gso.hdr_len));
+ print_hex_dump(KERN_ERR, "tun: ",
+ DUMP_PREFIX_NONE,
+ 16, 1, skb->head,
+ min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
+ }
WARN_ON_ONCE(1);
return -EINVAL;
}
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index a9c418890a1c..df9d767cb524 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1317,6 +1317,8 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
netif_set_tso_max_size(dev->net, 16384);
+ ax88179_reset(dev);
+
return 0;
}
@@ -1454,21 +1456,16 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* Skip IP alignment pseudo header */
skb_pull(skb, 2);
- skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd);
ax88179_rx_checksum(skb, pkt_hdr);
return 1;
}
- ax_skb = skb_clone(skb, GFP_ATOMIC);
+ ax_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len);
if (!ax_skb)
return 0;
- skb_trim(ax_skb, pkt_len);
-
- /* Skip IP alignment pseudo header */
- skb_pull(ax_skb, 2);
+ skb_put(ax_skb, pkt_len);
+ memcpy(ax_skb->data, skb->data + 2, pkt_len);
- skb->truesize = pkt_len_plus_padd +
- SKB_DATA_ALIGN(sizeof(struct sk_buff));
ax88179_rx_checksum(ax_skb, pkt_hdr);
usbnet_skb_return(dev, ax_skb);
@@ -1695,7 +1692,6 @@ static const struct driver_info ax88179_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1708,7 +1704,6 @@ static const struct driver_info ax88178a_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index e2e181378f41..a5469cf5cf67 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1368,6 +1368,9 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a9, 0)}, /* Telit FN920C04 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
@@ -1431,6 +1434,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
{QMI_QUIRK_SET_DTR(0x1546, 0x1312, 4)}, /* u-blox LARA-R6 01B */
{QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */
+ {QMI_QUIRK_SET_DTR(0x33f8, 0x0104, 4)}, /* Rolling RW101 RMNET */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c22d1118a133..115c3c5414f2 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -3807,6 +3807,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct virtnet_info *vi = netdev_priv(dev);
+ bool update = false;
int i;
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
@@ -3814,13 +3815,28 @@ static int virtnet_set_rxfh(struct net_device *dev,
return -EOPNOTSUPP;
if (rxfh->indir) {
+ if (!vi->has_rss)
+ return -EOPNOTSUPP;
+
for (i = 0; i < vi->rss_indir_table_size; ++i)
vi->ctrl->rss.indirection_table[i] = rxfh->indir[i];
+ update = true;
}
- if (rxfh->key)
+
+ if (rxfh->key) {
+ /* If either _F_HASH_REPORT or _F_RSS are negotiated, the
+ * device provides hash calculation capabilities, that is,
+ * hash_key is configured.
+ */
+ if (!vi->has_rss && !vi->has_rss_hash_report)
+ return -EOPNOTSUPP;
+
memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size);
+ update = true;
+ }
- virtnet_commit_rss_command(vi);
+ if (update)
+ virtnet_commit_rss_command(vi);
return 0;
}
@@ -4729,13 +4745,15 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
vi->has_rss_hash_report = true;
- if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) {
vi->has_rss = true;
- if (vi->has_rss || vi->has_rss_hash_report) {
vi->rss_indir_table_size =
virtio_cread16(vdev, offsetof(struct virtio_net_config,
rss_max_indirection_table_length));
+ }
+
+ if (vi->has_rss || vi->has_rss_hash_report) {
vi->rss_key_size =
virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 3495591a5c29..3a9148fb1422 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -1615,6 +1615,10 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
return false;
+ /* Ignore packets from invalid src-address */
+ if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
+ return false;
+
/* Get address from the outer IP header */
if (vxlan_get_sk_family(vs) == AF_INET) {
saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
@@ -1670,6 +1674,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
bool raw_proto = false;
void *oiph;
__be32 vni = 0;
+ int nh;
/* Need UDP and VXLAN header to be present */
if (!pskb_may_pull(skb, VXLAN_HLEN))
@@ -1758,12 +1763,28 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
skb->pkt_type = PACKET_HOST;
}
- oiph = skb_network_header(skb);
+ /* Save offset of outer header relative to skb->head,
+ * because we are going to reset the network header to the inner header
+ * and might change skb->head.
+ */
+ nh = skb_network_header(skb) - skb->head;
+
skb_reset_network_header(skb);
+ if (!pskb_inet_may_pull(skb)) {
+ DEV_STATS_INC(vxlan->dev, rx_length_errors);
+ DEV_STATS_INC(vxlan->dev, rx_errors);
+ vxlan_vnifilter_count(vxlan, vni, vninode,
+ VXLAN_VNI_STATS_RX_ERRORS, 0);
+ goto drop;
+ }
+
+ /* Get the outer header. */
+ oiph = skb->head + nh;
+
if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
- ++vxlan->dev->stats.rx_frame_errors;
- ++vxlan->dev->stats.rx_errors;
+ DEV_STATS_INC(vxlan->dev, rx_frame_errors);
+ DEV_STATS_INC(vxlan->dev, rx_errors);
vxlan_vnifilter_count(vxlan, vni, vninode,
VXLAN_VNI_STATS_RX_ERRORS, 0);
goto drop;
@@ -1833,7 +1854,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
+ vxlan_vnifilter_count(vxlan, vni, NULL,
+ VXLAN_VNI_STATS_TX_DROPS, 0);
goto out;
}
parp = arp_hdr(skb);
@@ -1889,7 +1912,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
reply->pkt_type = PACKET_HOST;
if (netif_rx(reply) == NET_RX_DROP) {
- dev->stats.rx_dropped++;
+ dev_core_stats_rx_dropped_inc(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2048,7 +2071,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
if (netif_rx(reply) == NET_RX_DROP) {
- dev->stats.rx_dropped++;
+ dev_core_stats_rx_dropped_inc(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2259,7 +2282,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
len);
} else {
drop:
- dev->stats.rx_dropped++;
+ dev_core_stats_rx_dropped_inc(dev);
vxlan_vnifilter_count(dst_vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2291,7 +2314,7 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
addr_family, dst_port,
vxlan->cfg.flags);
if (!dst_vxlan) {
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_ERRORS, 0);
kfree_skb(skb);
@@ -2555,7 +2578,7 @@ out_unlock:
return;
drop:
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
return;
@@ -2563,11 +2586,11 @@ drop:
tx_error:
rcu_read_unlock();
if (err == -ELOOP)
- dev->stats.collisions++;
+ DEV_STATS_INC(dev, collisions);
else if (err == -ENETUNREACH)
- dev->stats.tx_carrier_errors++;
+ DEV_STATS_INC(dev, tx_carrier_errors);
dst_release(ndst);
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_ERRORS, 0);
kfree_skb(skb);
}
@@ -2600,7 +2623,7 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev,
return;
drop:
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
@@ -2638,7 +2661,7 @@ static netdev_tx_t vxlan_xmit_nhid(struct sk_buff *skb, struct net_device *dev,
return NETDEV_TX_OK;
drop:
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
@@ -2735,7 +2758,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
!is_multicast_ether_addr(eth->h_dest))
vxlan_fdb_miss(vxlan, eth->h_dest);
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
kfree_skb(skb);
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index a6a37d67a50a..9f4bf41a3d41 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -9020,6 +9020,7 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
offload = &arvif->arp_ns_offload;
count = 0;
+ /* Note: read_lock_bh() calls rcu_read_lock() */
read_lock_bh(&idev->lock);
memset(offload->ipv6_addr, 0, sizeof(offload->ipv6_addr));
@@ -9050,7 +9051,8 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
}
/* get anycast address */
- for (ifaca6 = idev->ac_list; ifaca6; ifaca6 = ifaca6->aca_next) {
+ for (ifaca6 = rcu_dereference(idev->ac_list); ifaca6;
+ ifaca6 = rcu_dereference(ifaca6->aca_next)) {
if (count >= ATH11K_IPV6_MAX_COUNT)
goto generate;
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index 072b0a5827d1..eca1457caa0c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_BZ_UCODE_API_MAX 90
+#define IWL_BZ_UCODE_API_MAX 89
/* Lowest firmware API version supported */
#define IWL_BZ_UCODE_API_MIN 80
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index 9b79279fd76c..dbbcb2d0968c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_SC_UCODE_API_MAX 90
+#define IWL_SC_UCODE_API_MAX 89
/* Lowest firmware API version supported */
#define IWL_SC_UCODE_API_MIN 82
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 4863a3c74640..d84d7e955bb0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -53,6 +53,8 @@ int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (!pasn)
return -ENOBUFS;
+ iwl_mvm_ftm_remove_pasn_sta(mvm, addr);
+
pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
switch (pasn->cipher) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
index 9f69e04594e4..fe5bba8561d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
@@ -279,6 +279,7 @@ int iwl_mvm_unset_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id],
NULL);
+ iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id);
return 0;
}
@@ -296,7 +297,6 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return 0;
cmd.link_id = cpu_to_le32(link_info->fw_link_id);
- iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id);
link_info->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
cmd.spec_link_id = link_conf->link_id;
cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index f3e3986b4c72..11559563ae38 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -2813,7 +2813,8 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
if (ver_handler->version != scan_ver)
continue;
- return ver_handler->handler(mvm, vif, params, type, uid);
+ err = ver_handler->handler(mvm, vif, params, type, uid);
+ return err ? : uid;
}
err = iwl_mvm_scan_umac(mvm, vif, params, type, uid);
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index b55fe320633c..59e1fc0018df 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -3899,7 +3899,7 @@ static int hwsim_pmsr_report_nl(struct sk_buff *msg, struct genl_info *info)
}
nla_for_each_nested(peer, peers, rem) {
- struct cfg80211_pmsr_result result;
+ struct cfg80211_pmsr_result result = {};
err = mac80211_hwsim_parse_pmsr_result(peer, &result, info);
if (err)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 1fcbd83f7ff2..17421da139f2 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -390,9 +390,8 @@ bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
void xenvif_carrier_on(struct xenvif *vif);
-/* Callback from stack when TX packet can be released */
-void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf,
- bool zerocopy_success);
+/* Callbacks from stack when TX packet can be released */
+extern const struct ubuf_info_ops xenvif_ubuf_ops;
static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
{
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 7cff90aa8d24..65db5f14465f 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -593,7 +593,7 @@ int xenvif_init_queue(struct xenvif_queue *queue)
for (i = 0; i < MAX_PENDING_REQS; i++) {
queue->pending_tx_info[i].callback_struct = (struct ubuf_info_msgzc)
- { { .callback = xenvif_zerocopy_callback },
+ { { .ops = &xenvif_ubuf_ops },
{ { .ctx = NULL,
.desc = i } } };
queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index ef76850d9bcd..bab7e43ea05b 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1156,7 +1156,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
uarg = skb_shinfo(skb)->destructor_arg;
/* increase inflight counter to offset decrement in callback */
atomic_inc(&queue->inflight_packets);
- uarg->callback(NULL, uarg, true);
+ uarg->ops->complete(NULL, uarg, true);
skb_shinfo(skb)->destructor_arg = NULL;
/* Fill the skb with the new (local) frags. */
@@ -1278,8 +1278,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
return work_done;
}
-void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf_base,
- bool zerocopy_success)
+static void xenvif_zerocopy_callback(struct sk_buff *skb,
+ struct ubuf_info *ubuf_base,
+ bool zerocopy_success)
{
unsigned long flags;
pending_ring_idx_t index;
@@ -1312,6 +1313,10 @@ void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf_base,
xenvif_skb_zerocopy_complete(queue);
}
+const struct ubuf_info_ops xenvif_ubuf_ops = {
+ .complete = xenvif_zerocopy_callback,
+};
+
static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
{
struct gnttab_unmap_grant_ref *gop;
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index 7eb17f46a815..9e1a34e23af2 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -424,7 +424,8 @@ struct trf7970a {
enum trf7970a_state state;
struct device *dev;
struct spi_device *spi;
- struct regulator *regulator;
+ struct regulator *vin_regulator;
+ struct regulator *vddio_regulator;
struct nfc_digital_dev *ddev;
u32 quirks;
bool is_initiator;
@@ -1883,7 +1884,7 @@ static int trf7970a_power_up(struct trf7970a *trf)
if (trf->state != TRF7970A_ST_PWR_OFF)
return 0;
- ret = regulator_enable(trf->regulator);
+ ret = regulator_enable(trf->vin_regulator);
if (ret) {
dev_err(trf->dev, "%s - Can't enable VIN: %d\n", __func__, ret);
return ret;
@@ -1926,7 +1927,7 @@ static int trf7970a_power_down(struct trf7970a *trf)
if (trf->en2_gpiod && !(trf->quirks & TRF7970A_QUIRK_EN2_MUST_STAY_LOW))
gpiod_set_value_cansleep(trf->en2_gpiod, 0);
- ret = regulator_disable(trf->regulator);
+ ret = regulator_disable(trf->vin_regulator);
if (ret)
dev_err(trf->dev, "%s - Can't disable VIN: %d\n", __func__,
ret);
@@ -2065,37 +2066,37 @@ static int trf7970a_probe(struct spi_device *spi)
mutex_init(&trf->lock);
INIT_DELAYED_WORK(&trf->timeout_work, trf7970a_timeout_work_handler);
- trf->regulator = devm_regulator_get(&spi->dev, "vin");
- if (IS_ERR(trf->regulator)) {
- ret = PTR_ERR(trf->regulator);
+ trf->vin_regulator = devm_regulator_get(&spi->dev, "vin");
+ if (IS_ERR(trf->vin_regulator)) {
+ ret = PTR_ERR(trf->vin_regulator);
dev_err(trf->dev, "Can't get VIN regulator: %d\n", ret);
goto err_destroy_lock;
}
- ret = regulator_enable(trf->regulator);
+ ret = regulator_enable(trf->vin_regulator);
if (ret) {
dev_err(trf->dev, "Can't enable VIN: %d\n", ret);
goto err_destroy_lock;
}
- uvolts = regulator_get_voltage(trf->regulator);
+ uvolts = regulator_get_voltage(trf->vin_regulator);
if (uvolts > 4000000)
trf->chip_status_ctrl = TRF7970A_CHIP_STATUS_VRS5_3;
- trf->regulator = devm_regulator_get(&spi->dev, "vdd-io");
- if (IS_ERR(trf->regulator)) {
- ret = PTR_ERR(trf->regulator);
+ trf->vddio_regulator = devm_regulator_get(&spi->dev, "vdd-io");
+ if (IS_ERR(trf->vddio_regulator)) {
+ ret = PTR_ERR(trf->vddio_regulator);
dev_err(trf->dev, "Can't get VDD_IO regulator: %d\n", ret);
- goto err_destroy_lock;
+ goto err_disable_vin_regulator;
}
- ret = regulator_enable(trf->regulator);
+ ret = regulator_enable(trf->vddio_regulator);
if (ret) {
dev_err(trf->dev, "Can't enable VDD_IO: %d\n", ret);
- goto err_destroy_lock;
+ goto err_disable_vin_regulator;
}
- if (regulator_get_voltage(trf->regulator) == 1800000) {
+ if (regulator_get_voltage(trf->vddio_regulator) == 1800000) {
trf->io_ctrl = TRF7970A_REG_IO_CTRL_IO_LOW;
dev_dbg(trf->dev, "trf7970a config vdd_io to 1.8V\n");
}
@@ -2108,7 +2109,7 @@ static int trf7970a_probe(struct spi_device *spi)
if (!trf->ddev) {
dev_err(trf->dev, "Can't allocate NFC digital device\n");
ret = -ENOMEM;
- goto err_disable_regulator;
+ goto err_disable_vddio_regulator;
}
nfc_digital_set_parent_dev(trf->ddev, trf->dev);
@@ -2137,8 +2138,10 @@ err_shutdown:
trf7970a_shutdown(trf);
err_free_ddev:
nfc_digital_free_device(trf->ddev);
-err_disable_regulator:
- regulator_disable(trf->regulator);
+err_disable_vddio_regulator:
+ regulator_disable(trf->vddio_regulator);
+err_disable_vin_regulator:
+ regulator_disable(trf->vin_regulator);
err_destroy_lock:
mutex_destroy(&trf->lock);
return ret;
@@ -2157,7 +2160,8 @@ static void trf7970a_remove(struct spi_device *spi)
nfc_digital_unregister_device(trf->ddev);
nfc_digital_free_device(trf->ddev);
- regulator_disable(trf->regulator);
+ regulator_disable(trf->vddio_regulator);
+ regulator_disable(trf->vin_regulator);
mutex_destroy(&trf->lock);
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 27281a9a8951..bf7615cb36ee 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -629,27 +629,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
/*
- * Returns true for sink states that can't ever transition back to live.
- */
-static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
-{
- switch (nvme_ctrl_state(ctrl)) {
- case NVME_CTRL_NEW:
- case NVME_CTRL_LIVE:
- case NVME_CTRL_RESETTING:
- case NVME_CTRL_CONNECTING:
- return false;
- case NVME_CTRL_DELETING:
- case NVME_CTRL_DELETING_NOIO:
- case NVME_CTRL_DEAD:
- return true;
- default:
- WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
- return true;
- }
-}
-
-/*
* Waits for the controller state to be resetting, or returns false if it is
* not possible to ever transition to that state.
*/
@@ -2153,7 +2132,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
blk_mq_unfreeze_queue(ns->disk->queue);
if (blk_queue_is_zoned(ns->queue)) {
- ret = blk_revalidate_disk_zones(ns->disk, NULL);
+ ret = blk_revalidate_disk_zones(ns->disk);
if (ret && !nvme_first_scan(ns->disk))
goto out;
}
@@ -3681,7 +3660,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
"Found shared namespace %d, but multipathing not supported.\n",
info->nsid);
dev_warn_once(ctrl->device,
- "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
+ "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0.\n");
}
}
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 3dfd5ae99ae0..499a8bb7cac7 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -423,13 +423,20 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
pdu->result = le64_to_cpu(nvme_req(req)->result.u64);
/*
- * For iopoll, complete it directly.
+ * For iopoll, complete it directly. Note that using the uring_cmd
+ * helper for this is safe only because we check blk_rq_is_poll().
+ * As that returns false if we're NOT on a polled queue, then it's
+ * safe to use the polled completion helper.
+ *
* Otherwise, move the completion to task work.
*/
- if (blk_rq_is_poll(req))
- nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
- else
+ if (blk_rq_is_poll(req)) {
+ if (pdu->bio)
+ blk_rq_unmap_user(pdu->bio);
+ io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status);
+ } else {
io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
+ }
return RQ_END_IO_FREE;
}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 5397fb428b24..d16e976ae1a4 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -247,7 +247,8 @@ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
if (nvme_path_is_disabled(ns))
continue;
- if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
+ if (ns->ctrl->numa_node != NUMA_NO_NODE &&
+ READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
distance = node_distance(node, ns->ctrl->numa_node);
else
distance = LOCAL_DISTANCE;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index d0ed64dc7380..05532c281177 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -162,6 +162,11 @@ enum nvme_quirks {
* Disables simple suspend/resume path.
*/
NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND = (1 << 20),
+
+ /*
+ * MSI (but not MSI-X) interrupts are broken and never fire.
+ */
+ NVME_QUIRK_BROKEN_MSI = (1 << 21),
};
/*
@@ -741,6 +746,27 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
}
+/*
+ * Returns true for sink states that can't ever transition back to live.
+ */
+static inline bool nvme_state_terminal(struct nvme_ctrl *ctrl)
+{
+ switch (nvme_ctrl_state(ctrl)) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_LIVE:
+ case NVME_CTRL_RESETTING:
+ case NVME_CTRL_CONNECTING:
+ return false;
+ case NVME_CTRL_DELETING:
+ case NVME_CTRL_DELETING_NOIO:
+ case NVME_CTRL_DEAD:
+ return true;
+ default:
+ WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
+ return true;
+ }
+}
+
void nvme_complete_rq(struct request *req);
void nvme_complete_batch_req(struct request *req);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 8e0bb9692685..710043086dff 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1286,6 +1286,9 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
u32 csts = readl(dev->bar + NVME_REG_CSTS);
u8 opcode;
+ if (nvme_state_terminal(&dev->ctrl))
+ goto disable;
+
/* If PCI error recovery process is happening, we cannot reset or
* the recovery mechanism will surely fail.
*/
@@ -1390,8 +1393,11 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
return BLK_EH_RESET_TIMER;
disable:
- if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) {
+ if (nvme_state_terminal(&dev->ctrl))
+ nvme_dev_disable(dev, true);
return BLK_EH_DONE;
+ }
nvme_dev_disable(dev, false);
if (nvme_try_sched_reset(&dev->ctrl))
@@ -2218,6 +2224,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
.priv = dev,
};
unsigned int irq_queues, poll_queues;
+ unsigned int flags = PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY;
/*
* Poll queues don't need interrupts, but we need at least one I/O queue
@@ -2241,8 +2248,10 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
irq_queues = 1;
if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR))
irq_queues += (nr_io_queues - poll_queues);
- return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues,
- PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
+ if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI)
+ flags &= ~PCI_IRQ_MSI;
+ return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, flags,
+ &affd);
}
static unsigned int nvme_max_io_queues(struct nvme_dev *dev)
@@ -2471,6 +2480,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
{
int result = -ENOMEM;
struct pci_dev *pdev = to_pci_dev(dev->dev);
+ unsigned int flags = PCI_IRQ_ALL_TYPES;
if (pci_enable_device_mem(pdev))
return result;
@@ -2487,7 +2497,9 @@ static int nvme_pci_enable(struct nvme_dev *dev)
* interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
* adjust this later.
*/
- result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI)
+ flags &= ~PCI_IRQ_MSI;
+ result = pci_alloc_irq_vectors(pdev, 1, 1, flags);
if (result < 0)
goto disable;
@@ -3384,6 +3396,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
NVME_QUIRK_DISABLE_WRITE_ZEROES|
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x15b7, 0x5008), /* Sandisk SN530 */
+ .driver_data = NVME_QUIRK_BROKEN_MSI },
{ PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index fdbcdcedcee9..28bc2f373cfa 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -360,12 +360,18 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
} while (ret > 0);
}
-static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+static inline bool nvme_tcp_queue_has_pending(struct nvme_tcp_queue *queue)
{
return !list_empty(&queue->send_list) ||
!llist_empty(&queue->req_list);
}
+static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+{
+ return !nvme_tcp_tls(&queue->ctrl->ctrl) &&
+ nvme_tcp_queue_has_pending(queue);
+}
+
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
bool sync, bool last)
{
@@ -386,7 +392,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
mutex_unlock(&queue->send_mutex);
}
- if (last && nvme_tcp_queue_more(queue))
+ if (last && nvme_tcp_queue_has_pending(queue))
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
}
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
index 3ddbc3880cac..4f08362aee8b 100644
--- a/drivers/nvme/target/auth.c
+++ b/drivers/nvme/target/auth.c
@@ -285,9 +285,9 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
}
if (shash_len != crypto_shash_digestsize(shash_tfm)) {
- pr_debug("%s: hash len mismatch (len %d digest %d)\n",
- __func__, shash_len,
- crypto_shash_digestsize(shash_tfm));
+ pr_err("%s: hash len mismatch (len %d digest %d)\n",
+ __func__, shash_len,
+ crypto_shash_digestsize(shash_tfm));
ret = -EINVAL;
goto out_free_tfm;
}
@@ -370,7 +370,7 @@ out_free_response:
nvme_auth_free_key(transformed_key);
out_free_tfm:
crypto_free_shash(shash_tfm);
- return 0;
+ return ret;
}
int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
@@ -480,7 +480,7 @@ out_free_response:
nvme_auth_free_key(transformed_key);
out_free_tfm:
crypto_free_shash(shash_tfm);
- return 0;
+ return ret;
}
int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index a2325330bf22..7fda69395c1e 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -754,6 +754,18 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
NULL,
};
+bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid)
+{
+ struct config_item *ns_item;
+ char name[12];
+
+ snprintf(name, sizeof(name), "%u", nsid);
+ mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex);
+ ns_item = config_group_find_item(&subsys->namespaces_group, name);
+ mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex);
+ return ns_item != NULL;
+}
+
static void nvmet_ns_release(struct config_item *item)
{
struct nvmet_ns *ns = to_nvmet_ns(item);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 8860a3eb71ec..2fde22323622 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -437,10 +437,13 @@ void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
u16 nvmet_req_find_ns(struct nvmet_req *req)
{
u32 nsid = le32_to_cpu(req->cmd->common.nsid);
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
- req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
+ req->ns = xa_load(&subsys->namespaces, nsid);
if (unlikely(!req->ns)) {
req->error_loc = offsetof(struct nvme_common_command, nsid);
+ if (nvmet_subsys_nsid_exists(subsys, nsid))
+ return NVME_SC_INTERNAL_PATH_ERROR;
return NVME_SC_INVALID_NS | NVME_SC_DNR;
}
@@ -1683,7 +1686,8 @@ static int __init nvmet_init(void)
if (!buffered_io_wq)
goto out_free_zbd_work_queue;
- nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
+ nvmet_wq = alloc_workqueue("nvmet-wq",
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (!nvmet_wq)
goto out_free_buffered_work_queue;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index f460728e1df1..c1306de1f4dd 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -543,6 +543,7 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
struct nvmet_host *host);
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
u8 event_info, u8 log_page);
+bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid);
#define NVMET_MIN_QUEUE_SIZE 16
#define NVMET_MAX_QUEUE_SIZE 1024
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 5b8c63e74639..6e1b4140cde0 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -474,12 +474,8 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
return 0;
out_free:
- while (--i >= 0) {
- struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
-
- list_del(&rsp->free_list);
- nvmet_rdma_free_rsp(ndev, rsp);
- }
+ while (--i >= 0)
+ nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
kfree(queue->rsps);
out:
return ret;
@@ -490,12 +486,8 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
struct nvmet_rdma_device *ndev = queue->dev;
int i, nr_rsps = queue->recv_queue_size * 2;
- for (i = 0; i < nr_rsps; i++) {
- struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
-
- list_del(&rsp->free_list);
- nvmet_rdma_free_rsp(ndev, rsp);
- }
+ for (i = 0; i < nr_rsps; i++)
+ nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
kfree(queue->rsps);
}
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index a5422e2c979a..380f22ee3ebb 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -348,6 +348,7 @@ static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
return 0;
}
+/* If cmd buffers are NULL, no operation is performed */
static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
{
kfree(cmd->iov);
@@ -1581,13 +1582,9 @@ static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
struct nvmet_tcp_cmd *cmd = queue->cmds;
int i;
- for (i = 0; i < queue->nr_cmds; i++, cmd++) {
- if (nvmet_tcp_need_data_in(cmd))
- nvmet_tcp_free_cmd_buffers(cmd);
- }
-
- if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
- nvmet_tcp_free_cmd_buffers(&queue->connect);
+ for (i = 0; i < queue->nr_cmds; i++, cmd++)
+ nvmet_tcp_free_cmd_buffers(cmd);
+ nvmet_tcp_free_cmd_buffers(&queue->connect);
}
static void nvmet_tcp_release_queue_work(struct work_struct *w)
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 3148d9f1bde6..0021d06041c1 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -52,14 +52,10 @@ bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1))
return false;
/*
- * ZNS does not define a conventional zone type. If the underlying
- * device has a bitmap set indicating the existence of conventional
- * zones, reject the device. Otherwise, use report zones to detect if
- * the device has conventional zones.
+ * ZNS does not define a conventional zone type. Use report zones
+ * to detect if the device has conventional zones and reject it if
+ * it does.
*/
- if (ns->bdev->bd_disk->conv_zones_bitmap)
- return false;
-
ret = blkdev_report_zones(ns->bdev, 0, bdev_nr_zones(ns->bdev),
validate_conv_zones_cb, NULL);
if (ret < 0)
diff --git a/drivers/of/property.c b/drivers/of/property.c
index a6358ee99b74..769ae2cf5f3c 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -1252,6 +1252,7 @@ DEFINE_SIMPLE_PROP(backlight, "backlight", NULL)
DEFINE_SIMPLE_PROP(panel, "panel", NULL)
DEFINE_SIMPLE_PROP(msi_parent, "msi-parent", "#msi-cells")
DEFINE_SIMPLE_PROP(post_init_providers, "post-init-providers", NULL)
+DEFINE_SIMPLE_PROP(access_controllers, "access-controllers", "#access-controller-cells")
DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
@@ -1359,6 +1360,7 @@ static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_msi_parent, },
{ .parse_prop = parse_gpio_compat, },
{ .parse_prop = parse_interrupts, },
+ { .parse_prop = parse_access_controllers, },
{ .parse_prop = parse_regulators, },
{ .parse_prop = parse_gpio, },
{ .parse_prop = parse_gpios, },
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index f9f0b22bccbb..282eb5966fd0 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -1494,20 +1494,26 @@ _get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz)
return 0;
}
-/*
- * Callback function provided to the Energy Model framework upon registration.
+/**
+ * dev_pm_opp_calc_power() - Calculate power value for device with EM
+ * @dev : Device for which an Energy Model has to be registered
+ * @uW : New power value that is calculated
+ * @kHz : Frequency for which the new power is calculated
+ *
* This computes the power estimated by @dev at @kHz if it is the frequency
* of an existing OPP, or at the frequency of the first OPP above @kHz otherwise
* (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
* frequency and @uW to the associated power. The power is estimated as
* P = C * V^2 * f with C being the device's capacitance and V and f
* respectively the voltage and frequency of the OPP.
+ * It is also used as a callback function provided to the Energy Model
+ * framework upon registration.
*
* Returns -EINVAL if the power calculation failed because of missing
* parameters, 0 otherwise.
*/
-static int __maybe_unused _get_power(struct device *dev, unsigned long *uW,
- unsigned long *kHz)
+int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW,
+ unsigned long *kHz)
{
struct dev_pm_opp *opp;
struct device_node *np;
@@ -1544,6 +1550,7 @@ static int __maybe_unused _get_power(struct device *dev, unsigned long *uW,
return 0;
}
+EXPORT_SYMBOL_GPL(dev_pm_opp_calc_power);
static bool _of_has_opp_microwatt_property(struct device *dev)
{
@@ -1619,7 +1626,7 @@ int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus)
goto failed;
}
- EM_SET_ACTIVE_POWER_CB(em_cb, _get_power);
+ EM_SET_ACTIVE_POWER_CB(em_cb, dev_pm_opp_calc_power);
register_em:
ret = em_dev_register_perf_domain(dev, nr_opp, &em_cb, cpus, true);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 2428d278e015..47761c7ef267 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -177,8 +177,8 @@ void pci_restore_aspm_l1ss_state(struct pci_dev *pdev)
/* Restore L0s/L1 if they were enabled */
if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) ||
FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) {
- pcie_capability_write_word(parent, PCI_EXP_LNKCTL, clnkctl);
- pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, plnkctl);
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, plnkctl);
+ pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, clnkctl);
}
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index bf4833221816..eff7f5df08e2 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3766,14 +3766,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003e, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
/*
- * Apparently the LSI / Agere FW643 can't recover after a Secondary Bus
- * Reset and requires a power-off or suspend/resume and rescan. Prevent
- * use of that reset.
- */
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATT, 0x5900, quirk_no_bus_reset);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATT, 0x5901, quirk_no_bus_reset);
-
-/*
* Some TI KeyStone C667X devices do not support bus/hot reset. The PCIESS
* automatically disables LTSSM when Secondary Bus Reset is received and
* the device stops working. Prevent bus reset for these devices. With
diff --git a/drivers/perf/alibaba_uncore_drw_pmu.c b/drivers/perf/alibaba_uncore_drw_pmu.c
index a9277dcf90ce..89dd38343f93 100644
--- a/drivers/perf/alibaba_uncore_drw_pmu.c
+++ b/drivers/perf/alibaba_uncore_drw_pmu.c
@@ -709,6 +709,7 @@ static int ali_drw_pmu_probe(struct platform_device *pdev)
drw_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = &pdev->dev,
.task_ctx_nr = perf_invalid_context,
.event_init = ali_drw_pmu_event_init,
.add = ali_drw_pmu_add,
@@ -746,18 +747,14 @@ static int ali_drw_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
struct ali_drw_pmu_irq *irq;
struct ali_drw_pmu *drw_pmu;
unsigned int target;
- int ret;
- cpumask_t node_online_cpus;
irq = hlist_entry_safe(node, struct ali_drw_pmu_irq, node);
if (cpu != irq->cpu)
return 0;
- ret = cpumask_and(&node_online_cpus,
- cpumask_of_node(cpu_to_node(cpu)), cpu_online_mask);
- if (ret)
- target = cpumask_any_but(&node_online_cpus, cpu);
- else
+ target = cpumask_any_and_but(cpumask_of_node(cpu_to_node(cpu)),
+ cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
diff --git a/drivers/perf/amlogic/meson_ddr_pmu_core.c b/drivers/perf/amlogic/meson_ddr_pmu_core.c
index bbc7285fd934..07446d784a1a 100644
--- a/drivers/perf/amlogic/meson_ddr_pmu_core.c
+++ b/drivers/perf/amlogic/meson_ddr_pmu_core.c
@@ -492,6 +492,7 @@ int meson_ddr_pmu_create(struct platform_device *pdev)
*pmu = (struct ddr_pmu) {
.pmu = {
.module = THIS_MODULE,
+ .parent = &pdev->dev,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_invalid_context,
.attr_groups = attr_groups,
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index 6be03f81ae5d..a7fd80677919 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -1409,6 +1409,7 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
cci_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = &pdev->dev,
.name = cci_pmu->model->name,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = cci_pmu_enable,
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index 641471bd5eff..f4495ff6525f 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -1265,6 +1265,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
/* Perf driver registration */
ccn->dt.pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = ccn->dev,
.attr_groups = arm_ccn_pmu_attr_groups,
.task_ctx_nr = perf_invalid_context,
.event_init = arm_ccn_pmu_event_init,
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 7ef9c7e4836b..e26ad1d3ed0b 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -1950,20 +1950,20 @@ static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_no
struct arm_cmn *cmn;
unsigned int target;
int node;
- cpumask_t mask;
cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node);
if (cpu != cmn->cpu)
return 0;
node = dev_to_node(cmn->dev);
- if (cpumask_and(&mask, cpumask_of_node(node), cpu_online_mask) &&
- cpumask_andnot(&mask, &mask, cpumask_of(cpu)))
- target = cpumask_any(&mask);
- else
+
+ target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
target = cpumask_any_but(cpu_online_mask, cpu);
+
if (target < nr_cpu_ids)
arm_cmn_migrate(cmn, target);
+
return 0;
}
@@ -2482,6 +2482,7 @@ static int arm_cmn_probe(struct platform_device *pdev)
cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev));
cmn->pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = cmn->dev,
.attr_groups = arm_cmn_attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_invalid_context,
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
index b9a252272f1e..ba0cf2f466ef 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.c
+++ b/drivers/perf/arm_cspmu/arm_cspmu.c
@@ -1206,6 +1206,7 @@ static int arm_cspmu_register_pmu(struct arm_cspmu *cspmu)
cspmu->pmu = (struct pmu){
.task_ctx_nr = perf_invalid_context,
.module = cspmu->impl.module,
+ .parent = cspmu->dev,
.pmu_enable = arm_cspmu_enable,
.pmu_disable = arm_cspmu_disable,
.event_init = arm_cspmu_event_init,
@@ -1322,8 +1323,7 @@ static int arm_cspmu_cpu_online(unsigned int cpu, struct hlist_node *node)
static int arm_cspmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
{
- int dst;
- struct cpumask online_supported;
+ unsigned int dst;
struct arm_cspmu *cspmu =
hlist_entry_safe(node, struct arm_cspmu, cpuhp_node);
@@ -1333,9 +1333,8 @@ static int arm_cspmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
return 0;
/* Choose a new CPU to migrate ownership of the PMU to */
- cpumask_and(&online_supported, &cspmu->associated_cpus,
- cpu_online_mask);
- dst = cpumask_any_but(&online_supported, cpu);
+ dst = cpumask_any_and_but(&cspmu->associated_cpus,
+ cpu_online_mask, cpu);
if (dst >= nr_cpu_ids)
return 0;
diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
index 8a81be2dd5ec..2ec96e204c40 100644
--- a/drivers/perf/arm_dmc620_pmu.c
+++ b/drivers/perf/arm_dmc620_pmu.c
@@ -673,6 +673,7 @@ static int dmc620_pmu_device_probe(struct platform_device *pdev)
dmc620_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = &pdev->dev,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_invalid_context,
.event_init = dmc620_pmu_event_init,
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
index bae3ca37f846..92248a24a1aa 100644
--- a/drivers/perf/arm_dsu_pmu.c
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -230,15 +230,6 @@ static const struct attribute_group *dsu_pmu_attr_groups[] = {
NULL,
};
-static int dsu_pmu_get_online_cpu_any_but(struct dsu_pmu *dsu_pmu, int cpu)
-{
- struct cpumask online_supported;
-
- cpumask_and(&online_supported,
- &dsu_pmu->associated_cpus, cpu_online_mask);
- return cpumask_any_but(&online_supported, cpu);
-}
-
static inline bool dsu_pmu_counter_valid(struct dsu_pmu *dsu_pmu, u32 idx)
{
return (idx < dsu_pmu->num_counters) ||
@@ -751,6 +742,7 @@ static int dsu_pmu_device_probe(struct platform_device *pdev)
dsu_pmu->pmu = (struct pmu) {
.task_ctx_nr = perf_invalid_context,
+ .parent = &pdev->dev,
.module = THIS_MODULE,
.pmu_enable = dsu_pmu_enable,
.pmu_disable = dsu_pmu_disable,
@@ -827,14 +819,16 @@ static int dsu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
static int dsu_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
{
- int dst;
- struct dsu_pmu *dsu_pmu = hlist_entry_safe(node, struct dsu_pmu,
- cpuhp_node);
+ struct dsu_pmu *dsu_pmu;
+ unsigned int dst;
+
+ dsu_pmu = hlist_entry_safe(node, struct dsu_pmu, cpuhp_node);
if (!cpumask_test_and_clear_cpu(cpu, &dsu_pmu->active_cpu))
return 0;
- dst = dsu_pmu_get_online_cpu_any_but(dsu_pmu, cpu);
+ dst = cpumask_any_and_but(&dsu_pmu->associated_cpus,
+ cpu_online_mask, cpu);
/* If there are no active CPUs in the DSU, leave IRQ disabled */
if (dst >= nr_cpu_ids)
return 0;
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 3596db36cbff..4b1a9a92ea11 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -196,6 +196,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
if (!pmu)
return -ENOMEM;
+ pmu->pmu.parent = &pdev->dev;
pmu->plat_device = pdev;
ret = pmu_parse_irqs(pmu);
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index 719aa953a1c4..d5fa92ba8373 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -860,6 +860,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
smmu_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = &pdev->dev,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = smmu_pmu_enable,
.pmu_disable = smmu_pmu_disable,
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 35f0de03416f..9100d82bfabc 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -932,6 +932,7 @@ static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu)
spe_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = &spe_pmu->pdev->dev,
.capabilities = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE,
.attr_groups = arm_spe_pmu_attr_groups,
/*
diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c
index 957058ad0099..c5e328f23841 100644
--- a/drivers/perf/dwc_pcie_pmu.c
+++ b/drivers/perf/dwc_pcie_pmu.c
@@ -690,9 +690,8 @@ static int dwc_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_n
{
struct dwc_pcie_pmu *pcie_pmu;
struct pci_dev *pdev;
- int node;
- cpumask_t mask;
unsigned int target;
+ int node;
pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node);
/* Nothing to do if this CPU doesn't own the PMU */
@@ -702,10 +701,9 @@ static int dwc_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_n
pcie_pmu->on_cpu = -1;
pdev = pcie_pmu->pdev;
node = dev_to_node(&pdev->dev);
- if (cpumask_and(&mask, cpumask_of_node(node), cpu_online_mask) &&
- cpumask_andnot(&mask, &mask, cpumask_of(cpu)))
- target = cpumask_any(&mask);
- else
+
+ target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids) {
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 4e8fa5a48fcf..1bbdb29743c4 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -651,6 +651,7 @@ static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
*pmu = (struct ddr_pmu) {
.pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = dev,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_invalid_context,
.attr_groups = attr_groups,
diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c
index 5d1f0e9fdb08..03c506aa3853 100644
--- a/drivers/perf/hisilicon/hisi_pcie_pmu.c
+++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c
@@ -350,15 +350,27 @@ static bool hisi_pcie_pmu_validate_event_group(struct perf_event *event)
return false;
for (num = 0; num < counters; num++) {
+ /*
+ * If we find a related event, then it's a valid group
+ * since we don't need to allocate a new counter for it.
+ */
if (hisi_pcie_pmu_cmp_event(event_group[num], sibling))
break;
}
+ /*
+ * Otherwise it's a new event but if there's no available counter,
+ * fail the check since we cannot schedule all the events in
+ * the group simultaneously.
+ */
+ if (num == HISI_PCIE_MAX_COUNTERS)
+ return false;
+
if (num == counters)
event_group[counters++] = sibling;
}
- return counters <= HISI_PCIE_MAX_COUNTERS;
+ return true;
}
static int hisi_pcie_pmu_event_init(struct perf_event *event)
@@ -673,7 +685,6 @@ static int hisi_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct hisi_pcie_pmu *pcie_pmu = hlist_entry_safe(node, struct hisi_pcie_pmu, node);
unsigned int target;
- cpumask_t mask;
int numa_node;
/* Nothing to do if this CPU doesn't own the PMU */
@@ -684,10 +695,10 @@ static int hisi_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
/* Choose a local CPU from all online cpus. */
numa_node = dev_to_node(&pcie_pmu->pdev->dev);
- if (cpumask_and(&mask, cpumask_of_node(numa_node), cpu_online_mask) &&
- cpumask_andnot(&mask, &mask, cpumask_of(cpu)))
- target = cpumask_any(&mask);
- else
+
+ target = cpumask_any_and_but(cpumask_of_node(numa_node),
+ cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids) {
@@ -807,6 +818,7 @@ static int hisi_pcie_alloc_pmu(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_
pcie_pmu->pmu = (struct pmu) {
.name = name,
.module = THIS_MODULE,
+ .parent = &pdev->dev,
.event_init = hisi_pcie_pmu_event_init,
.pmu_enable = hisi_pcie_pmu_enable,
.pmu_disable = hisi_pcie_pmu_disable,
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 04031450d5fe..a60e4c966098 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -504,7 +504,6 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu,
node);
- cpumask_t pmu_online_cpus;
unsigned int target;
if (!cpumask_test_and_clear_cpu(cpu, &hisi_pmu->associated_cpus))
@@ -518,9 +517,8 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
hisi_pmu->on_cpu = -1;
/* Choose a new CPU to migrate ownership of the PMU to */
- cpumask_and(&pmu_online_cpus, &hisi_pmu->associated_cpus,
- cpu_online_mask);
- target = cpumask_any_but(&pmu_online_cpus, cpu);
+ target = cpumask_any_and_but(&hisi_pmu->associated_cpus,
+ cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;
@@ -538,6 +536,7 @@ void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module)
struct pmu *pmu = &hisi_pmu->pmu;
pmu->module = module;
+ pmu->parent = hisi_pmu->dev;
pmu->task_ctx_nr = perf_invalid_context;
pmu->event_init = hisi_uncore_pmu_event_init;
pmu->pmu_enable = hisi_uncore_pmu_enable;
diff --git a/drivers/perf/hisilicon/hns3_pmu.c b/drivers/perf/hisilicon/hns3_pmu.c
index 16869bf5bf4c..e900f8e00b18 100644
--- a/drivers/perf/hisilicon/hns3_pmu.c
+++ b/drivers/perf/hisilicon/hns3_pmu.c
@@ -1085,15 +1085,27 @@ static bool hns3_pmu_validate_event_group(struct perf_event *event)
return false;
for (num = 0; num < counters; num++) {
+ /*
+ * If we find a related event, then it's a valid group
+ * since we don't need to allocate a new counter for it.
+ */
if (hns3_pmu_cmp_event(event_group[num], sibling))
break;
}
+ /*
+ * Otherwise it's a new event but if there's no available counter,
+ * fail the check since we cannot schedule all the events in
+ * the group simultaneously.
+ */
+ if (num == HNS3_PMU_MAX_HW_EVENTS)
+ return false;
+
if (num == counters)
event_group[counters++] = sibling;
}
- return counters <= HNS3_PMU_MAX_HW_EVENTS;
+ return true;
}
static u32 hns3_pmu_get_filter_condition(struct perf_event *event)
@@ -1419,6 +1431,7 @@ static int hns3_pmu_alloc_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
hns3_pmu->pmu = (struct pmu) {
.name = name,
.module = THIS_MODULE,
+ .parent = &pdev->dev,
.event_init = hns3_pmu_event_init,
.pmu_enable = hns3_pmu_enable,
.pmu_disable = hns3_pmu_disable,
@@ -1515,7 +1528,7 @@ static int hns3_pmu_irq_register(struct pci_dev *pdev,
return ret;
}
- ret = devm_add_action(&pdev->dev, hns3_pmu_free_irq, pdev);
+ ret = devm_add_action_or_reset(&pdev->dev, hns3_pmu_free_irq, pdev);
if (ret) {
pci_err(pdev, "failed to add free irq action, ret = %d.\n", ret);
return ret;
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index 148df5ae8ef8..980e3051edd7 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -801,9 +801,8 @@ static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
static int l2cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
- struct cluster_pmu *cluster;
struct l2cache_pmu *l2cache_pmu;
- cpumask_t cluster_online_cpus;
+ struct cluster_pmu *cluster;
unsigned int target;
l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
@@ -820,9 +819,8 @@ static int l2cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
cluster->on_cpu = -1;
/* Any other CPU for this cluster which is still online */
- cpumask_and(&cluster_online_cpus, &cluster->cluster_cpus,
- cpu_online_mask);
- target = cpumask_any_but(&cluster_online_cpus, cpu);
+ target = cpumask_any_and_but(&cluster->cluster_cpus,
+ cpu_online_mask, cpu);
if (target >= nr_cpu_ids) {
disable_irq(cluster->irq);
return 0;
@@ -904,6 +902,7 @@ static int l2_cache_pmu_probe(struct platform_device *pdev)
l2cache_pmu->pmu = (struct pmu) {
/* suffix is instance id for future use with multiple sockets */
.name = "l2cache_0",
+ .parent = &pdev->dev,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = l2_cache_pmu_enable,
.pmu_disable = l2_cache_pmu_disable,
diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c
index f16783d03db7..37786e88514e 100644
--- a/drivers/perf/qcom_l3_pmu.c
+++ b/drivers/perf/qcom_l3_pmu.c
@@ -748,6 +748,7 @@ static int qcom_l3_cache_pmu_probe(struct platform_device *pdev)
return -ENOMEM;
l3pmu->pmu = (struct pmu) {
+ .parent = &pdev->dev,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = qcom_l3_cache__pmu_enable,
diff --git a/drivers/perf/riscv_pmu_legacy.c b/drivers/perf/riscv_pmu_legacy.c
index fa0bccf4edf2..04487ad7fba0 100644
--- a/drivers/perf/riscv_pmu_legacy.c
+++ b/drivers/perf/riscv_pmu_legacy.c
@@ -136,6 +136,7 @@ static int pmu_legacy_device_probe(struct platform_device *pdev)
pmu = riscv_pmu_alloc();
if (!pmu)
return -ENOMEM;
+ pmu->pmu.parent = &pdev->dev;
pmu_legacy_init(pmu);
return 0;
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 8cbe6e5f9c39..82636273d726 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -1043,7 +1043,6 @@ static struct ctl_table sbi_pmu_sysctl_table[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_TWO,
},
- { }
};
static int pmu_sbi_device_probe(struct platform_device *pdev)
@@ -1081,6 +1080,7 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
}
pmu->pmu.attr_groups = riscv_pmu_attr_groups;
+ pmu->pmu.parent = &pdev->dev;
pmu->cmask = cmask;
pmu->ctr_start = pmu_sbi_ctr_start;
pmu->ctr_stop = pmu_sbi_ctr_stop;
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index e16d10c763de..faf763d2c95c 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -504,24 +504,19 @@ static void tx2_uncore_event_update(struct perf_event *event)
static enum tx2_uncore_type get_tx2_pmu_type(struct acpi_device *adev)
{
- int i = 0;
- struct acpi_tx2_pmu_device {
- __u8 id[ACPI_ID_LEN];
- enum tx2_uncore_type type;
- } devices[] = {
+ struct acpi_device_id devices[] = {
{"CAV901D", PMU_TYPE_L3C},
{"CAV901F", PMU_TYPE_DMC},
{"CAV901E", PMU_TYPE_CCPI2},
- {"", PMU_TYPE_INVALID}
+ {}
};
+ const struct acpi_device_id *id;
- while (devices[i].type != PMU_TYPE_INVALID) {
- if (!strcmp(acpi_device_hid(adev), devices[i].id))
- break;
- i++;
- }
+ id = acpi_match_acpi_device(devices, adev);
+ if (!id)
+ return PMU_TYPE_INVALID;
- return devices[i].type;
+ return (enum tx2_uncore_type)id->driver_data;
}
static bool tx2_uncore_validate_event(struct pmu *pmu,
@@ -729,6 +724,7 @@ static int tx2_uncore_pmu_register(
/* Perf event registration */
tx2_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = tx2_pmu->dev,
.attr_groups = tx2_pmu->attr_groups,
.task_ctx_nr = perf_invalid_context,
.event_init = tx2_uncore_event_init,
@@ -932,9 +928,8 @@ static int tx2_uncore_pmu_online_cpu(unsigned int cpu,
static int tx2_uncore_pmu_offline_cpu(unsigned int cpu,
struct hlist_node *hpnode)
{
- int new_cpu;
struct tx2_uncore_pmu *tx2_pmu;
- struct cpumask cpu_online_mask_temp;
+ unsigned int new_cpu;
tx2_pmu = hlist_entry_safe(hpnode,
struct tx2_uncore_pmu, hpnode);
@@ -945,11 +940,8 @@ static int tx2_uncore_pmu_offline_cpu(unsigned int cpu,
if (tx2_pmu->hrtimer_callback)
hrtimer_cancel(&tx2_pmu->hrtimer);
- cpumask_copy(&cpu_online_mask_temp, cpu_online_mask);
- cpumask_clear_cpu(cpu, &cpu_online_mask_temp);
- new_cpu = cpumask_any_and(
- cpumask_of_node(tx2_pmu->node),
- &cpu_online_mask_temp);
+ new_cpu = cpumask_any_and_but(cpumask_of_node(tx2_pmu->node),
+ cpu_online_mask, cpu);
tx2_pmu->cpu = new_cpu;
if (new_cpu >= nr_cpu_ids)
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 0d49343d704b..8823b4c6b556 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1102,6 +1102,7 @@ static int xgene_init_perf(struct xgene_pmu_dev *pmu_dev, char *name)
/* Perf driver registration */
pmu_dev->pmu = (struct pmu) {
+ .parent = pmu_dev->parent->dev,
.attr_groups = pmu_dev->attr_groups,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = xgene_perf_pmu_enable,
diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
index b700f52b7b67..11fcb1867118 100644
--- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
@@ -110,8 +110,10 @@ static int imx8_pcie_phy_power_on(struct phy *phy)
/* Source clock from SoC internal PLL */
writel(ANA_PLL_CLK_OUT_TO_EXT_IO_SEL,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG062);
- writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
- imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG063);
+ if (imx8_phy->drvdata->variant != IMX8MM) {
+ writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
+ imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG063);
+ }
val = ANA_AUX_RX_TX_SEL_TX | ANA_AUX_TX_TERM;
writel(val | ANA_AUX_RX_TERM_GND_EN,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG064);
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
index 41162d7228c9..1d1db1737422 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
@@ -603,7 +603,7 @@ static void comphy_gbe_phy_init(struct mvebu_a3700_comphy_lane *lane,
u16 val;
fix_idx = 0;
- for (addr = 0; addr < 512; addr++) {
+ for (addr = 0; addr < ARRAY_SIZE(gbe_phy_init); addr++) {
/*
* All PHY register values are defined in full for 3.125Gbps
* SERDES speed. The values required for 1.25 Gbps are almost
@@ -611,11 +611,12 @@ static void comphy_gbe_phy_init(struct mvebu_a3700_comphy_lane *lane,
* comparison to 3.125 Gbps values. These register values are
* stored in "gbe_phy_init_fix" array.
*/
- if (!is_1gbps && gbe_phy_init_fix[fix_idx].addr == addr) {
+ if (!is_1gbps &&
+ fix_idx < ARRAY_SIZE(gbe_phy_init_fix) &&
+ gbe_phy_init_fix[fix_idx].addr == addr) {
/* Use new value */
val = gbe_phy_init_fix[fix_idx].value;
- if (fix_idx < ARRAY_SIZE(gbe_phy_init_fix))
- fix_idx++;
+ fix_idx++;
} else {
val = gbe_phy_init[addr];
}
diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c
index 03fb0d4b75d7..20d4c020a83c 100644
--- a/drivers/phy/qualcomm/phy-qcom-m31.c
+++ b/drivers/phy/qualcomm/phy-qcom-m31.c
@@ -297,7 +297,7 @@ static int m31usb_phy_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(qphy->phy),
"failed to create phy\n");
- qphy->vreg = devm_regulator_get(dev, "vdda-phy");
+ qphy->vreg = devm_regulator_get(dev, "vdd");
if (IS_ERR(qphy->vreg))
return dev_err_probe(dev, PTR_ERR(qphy->vreg),
"failed to get vreg\n");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 7d585a4bbbba..c21cdb8dbfe7 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -77,6 +77,7 @@ enum qphy_reg_layout {
QPHY_COM_BIAS_EN_CLKBUFLR_EN,
QPHY_DP_PHY_STATUS,
+ QPHY_DP_PHY_VCO_DIV,
QPHY_TX_TX_POL_INV,
QPHY_TX_TX_DRV_LVL,
@@ -102,6 +103,7 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V3_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V3_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V3_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V3_TX_TX_DRV_LVL,
@@ -126,6 +128,7 @@ static const unsigned int qmp_v45_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V4_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V4_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V4_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V4_TX_TX_DRV_LVL,
@@ -150,6 +153,7 @@ static const unsigned int qmp_v5_5nm_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V5_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V5_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V5_5NM_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V5_5NM_TX_TX_DRV_LVL,
@@ -174,6 +178,7 @@ static const unsigned int qmp_v6_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V6_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V6_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V6_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V6_TX_TX_DRV_LVL,
@@ -2150,9 +2155,9 @@ static bool qmp_combo_configure_dp_mode(struct qmp_combo *qmp)
writel(val, qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL);
if (reverse)
- writel(0x4c, qmp->pcs + QSERDES_DP_PHY_MODE);
+ writel(0x4c, qmp->dp_dp_phy + QSERDES_DP_PHY_MODE);
else
- writel(0x5c, qmp->pcs + QSERDES_DP_PHY_MODE);
+ writel(0x5c, qmp->dp_dp_phy + QSERDES_DP_PHY_MODE);
return reverse;
}
@@ -2162,6 +2167,7 @@ static int qmp_combo_configure_dp_clocks(struct qmp_combo *qmp)
const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
u32 phy_vco_div;
unsigned long pixel_freq;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
switch (dp_opts->link_rate) {
case 1620:
@@ -2184,7 +2190,7 @@ static int qmp_combo_configure_dp_clocks(struct qmp_combo *qmp)
/* Other link rates aren't supported */
return -EINVAL;
}
- writel(phy_vco_div, qmp->dp_dp_phy + QSERDES_V4_DP_PHY_VCO_DIV);
+ writel(phy_vco_div, qmp->dp_dp_phy + cfg->regs[QPHY_DP_PHY_VCO_DIV]);
clk_set_rate(qmp->dp_link_hw.clk, dp_opts->link_rate * 100000);
clk_set_rate(qmp->dp_pixel_hw.clk, pixel_freq);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h
index f5cfacf9be96..181057421c11 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v5.h
@@ -7,6 +7,7 @@
#define QCOM_PHY_QMP_DP_PHY_V5_H_
/* Only for QMP V5 PHY - DP PHY registers */
+#define QSERDES_V5_DP_PHY_VCO_DIV 0x070
#define QSERDES_V5_DP_PHY_AUX_INTERRUPT_STATUS 0x0d8
#define QSERDES_V5_DP_PHY_STATUS 0x0dc
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h
index 01a20d3be4b8..fa967a1af058 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-dp-phy-v6.h
@@ -7,6 +7,7 @@
#define QCOM_PHY_QMP_DP_PHY_V6_H_
/* Only for QMP V6 PHY - DP PHY registers */
+#define QSERDES_V6_DP_PHY_VCO_DIV 0x070
#define QSERDES_V6_DP_PHY_AUX_INTERRUPT_STATUS 0x0e0
#define QSERDES_V6_DP_PHY_STATUS 0x0e4
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index a34f67bb7e61..b60a4b60451e 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -87,6 +87,7 @@ config PHY_ROCKCHIP_SAMSUNG_HDPTX
tristate "Rockchip Samsung HDMI/eDP Combo PHY driver"
depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
select GENERIC_PHY
+ select RATIONAL
help
Enable this to support the Rockchip HDMI/eDP Combo PHY
with Samsung IP block.
diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
index 76b9cf417591..bf74e429ff46 100644
--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
@@ -125,12 +125,15 @@ struct rockchip_combphy_grfcfg {
};
struct rockchip_combphy_cfg {
+ unsigned int num_phys;
+ unsigned int phy_ids[3];
const struct rockchip_combphy_grfcfg *grfcfg;
int (*combphy_cfg)(struct rockchip_combphy_priv *priv);
};
struct rockchip_combphy_priv {
u8 type;
+ int id;
void __iomem *mmio;
int num_clks;
struct clk_bulk_data *clks;
@@ -320,7 +323,7 @@ static int rockchip_combphy_probe(struct platform_device *pdev)
struct rockchip_combphy_priv *priv;
const struct rockchip_combphy_cfg *phy_cfg;
struct resource *res;
- int ret;
+ int ret, id;
phy_cfg = of_device_get_match_data(dev);
if (!phy_cfg) {
@@ -338,6 +341,15 @@ static int rockchip_combphy_probe(struct platform_device *pdev)
return ret;
}
+ /* find the phy-id from the io address */
+ priv->id = -ENODEV;
+ for (id = 0; id < phy_cfg->num_phys; id++) {
+ if (res->start == phy_cfg->phy_ids[id]) {
+ priv->id = id;
+ break;
+ }
+ }
+
priv->dev = dev;
priv->type = PHY_NONE;
priv->cfg = phy_cfg;
@@ -562,6 +574,12 @@ static const struct rockchip_combphy_grfcfg rk3568_combphy_grfcfgs = {
};
static const struct rockchip_combphy_cfg rk3568_combphy_cfgs = {
+ .num_phys = 3,
+ .phy_ids = {
+ 0xfe820000,
+ 0xfe830000,
+ 0xfe840000,
+ },
.grfcfg = &rk3568_combphy_grfcfgs,
.combphy_cfg = rk3568_combphy_cfg,
};
@@ -578,8 +596,14 @@ static int rk3588_combphy_cfg(struct rockchip_combphy_priv *priv)
rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_pcie, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con2_for_pcie, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con3_for_pcie, true);
- rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l0_sel, true);
- rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l1_sel, true);
+ switch (priv->id) {
+ case 1:
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l0_sel, true);
+ break;
+ case 2:
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l1_sel, true);
+ break;
+ }
break;
case PHY_TYPE_USB3:
/* Set SSC downward spread spectrum */
@@ -736,6 +760,12 @@ static const struct rockchip_combphy_grfcfg rk3588_combphy_grfcfgs = {
};
static const struct rockchip_combphy_cfg rk3588_combphy_cfgs = {
+ .num_phys = 3,
+ .phy_ids = {
+ 0xfee00000,
+ 0xfee10000,
+ 0xfee20000,
+ },
.grfcfg = &rk3588_combphy_grfcfgs,
.combphy_cfg = rk3588_combphy_cfg,
};
diff --git a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
index 121e5961ce11..9857ee45b89e 100644
--- a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
+++ b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
@@ -40,6 +40,8 @@
#define RK3588_BIFURCATION_LANE_0_1 BIT(0)
#define RK3588_BIFURCATION_LANE_2_3 BIT(1)
#define RK3588_LANE_AGGREGATION BIT(2)
+#define RK3588_PCIE1LN_SEL_EN (GENMASK(1, 0) << 16)
+#define RK3588_PCIE30_PHY_MODE_EN (GENMASK(2, 0) << 16)
struct rockchip_p3phy_ops;
@@ -132,7 +134,7 @@ static const struct rockchip_p3phy_ops rk3568_ops = {
static int rockchip_p3phy_rk3588_init(struct rockchip_p3phy_priv *priv)
{
u32 reg = 0;
- u8 mode = 0;
+ u8 mode = RK3588_LANE_AGGREGATION; /* default */
int ret;
/* Deassert PCIe PMA output clamp mode */
@@ -140,31 +142,24 @@ static int rockchip_p3phy_rk3588_init(struct rockchip_p3phy_priv *priv)
/* Set bifurcation if needed */
for (int i = 0; i < priv->num_lanes; i++) {
- if (!priv->lanes[i])
- mode |= (BIT(i) << 3);
-
if (priv->lanes[i] > 1)
- mode |= (BIT(i) >> 1);
- }
-
- if (!mode)
- reg = RK3588_LANE_AGGREGATION;
- else {
- if (mode & (BIT(0) | BIT(1)))
- reg |= RK3588_BIFURCATION_LANE_0_1;
-
- if (mode & (BIT(2) | BIT(3)))
- reg |= RK3588_BIFURCATION_LANE_2_3;
+ mode &= ~RK3588_LANE_AGGREGATION;
+ if (priv->lanes[i] == 3)
+ mode |= RK3588_BIFURCATION_LANE_0_1;
+ if (priv->lanes[i] == 4)
+ mode |= RK3588_BIFURCATION_LANE_2_3;
}
- regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0, (0x7<<16) | reg);
+ reg = mode;
+ regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0,
+ RK3588_PCIE30_PHY_MODE_EN | reg);
/* Set pcie1ln_sel in PHP_GRF_PCIESEL_CON */
if (!IS_ERR(priv->pipe_grf)) {
- reg = (mode & (BIT(6) | BIT(7))) >> 6;
+ reg = mode & (RK3588_BIFURCATION_LANE_0_1 | RK3588_BIFURCATION_LANE_2_3);
if (reg)
regmap_write(priv->pipe_grf, PHP_GRF_PCIESEL_CON,
- (reg << 16) | reg);
+ RK3588_PCIE1LN_SEL_EN | reg);
}
reset_control_deassert(priv->p30phy);
diff --git a/drivers/phy/ti/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c
index 13cd614e12a1..751fecd466e3 100644
--- a/drivers/phy/ti/phy-tusb1210.c
+++ b/drivers/phy/ti/phy-tusb1210.c
@@ -69,7 +69,6 @@ struct tusb1210 {
struct delayed_work chg_det_work;
struct notifier_block psy_nb;
struct power_supply *psy;
- struct power_supply *charger;
#endif
};
@@ -236,19 +235,24 @@ static const char * const tusb1210_chargers[] = {
static bool tusb1210_get_online(struct tusb1210 *tusb)
{
+ struct power_supply *charger = NULL;
union power_supply_propval val;
- int i;
+ bool online = false;
+ int i, ret;
- for (i = 0; i < ARRAY_SIZE(tusb1210_chargers) && !tusb->charger; i++)
- tusb->charger = power_supply_get_by_name(tusb1210_chargers[i]);
+ for (i = 0; i < ARRAY_SIZE(tusb1210_chargers) && !charger; i++)
+ charger = power_supply_get_by_name(tusb1210_chargers[i]);
- if (!tusb->charger)
+ if (!charger)
return false;
- if (power_supply_get_property(tusb->charger, POWER_SUPPLY_PROP_ONLINE, &val))
- return false;
+ ret = power_supply_get_property(charger, POWER_SUPPLY_PROP_ONLINE, &val);
+ if (ret == 0)
+ online = val.intval;
+
+ power_supply_put(charger);
- return val.intval;
+ return online;
}
static void tusb1210_chg_det_work(struct work_struct *work)
@@ -473,9 +477,6 @@ static void tusb1210_remove_charger_detect(struct tusb1210 *tusb)
cancel_delayed_work_sync(&tusb->chg_det_work);
power_supply_unregister(tusb->psy);
}
-
- if (tusb->charger)
- power_supply_put(tusb->charger);
}
#else
static void tusb1210_probe_charger_detect(struct tusb1210 *tusb) { }
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index d45657aa986a..7e4f93a3bc7a 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -235,13 +235,13 @@ config PINCTRL_INGENIC
config PINCTRL_K210
bool "Pinctrl driver for the Canaan Kendryte K210 SoC"
- depends on RISCV && SOC_CANAAN && OF
+ depends on RISCV && SOC_CANAAN_K210 && OF
select GENERIC_PINMUX_FUNCTIONS
select GENERIC_PINCONF
select GPIOLIB
select OF_GPIO
select REGMAP_MMIO
- default SOC_CANAAN
+ default SOC_CANAAN_K210
help
Add support for the Canaan Kendryte K210 RISC-V SOC Field
Programmable IO Array (FPIOA) controller.
@@ -450,6 +450,17 @@ config PINCTRL_ROCKCHIP
help
This support pinctrl and GPIO driver for Rockchip SoCs.
+config PINCTRL_SCMI
+ tristate "Pinctrl driver using SCMI protocol interface"
+ depends on ARM_SCMI_PROTOCOL || COMPILE_TEST
+ select PINMUX
+ select GENERIC_PINCONF
+ help
+ This driver provides support for pinctrl which is controlled
+ by firmware that implements the SCMI interface.
+ It uses SCMI Message Protocol to interact with the
+ firmware providing all the pinctrl controls.
+
config PINCTRL_SINGLE
tristate "One-register-per-pin type device tree based pinctrl driver"
depends on OF
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 2152539b53d5..cc809669405a 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_PINCTRL_PIC32) += pinctrl-pic32.o
obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o
obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
+obj-$(CONFIG_PINCTRL_SCMI) += pinctrl-scmi.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index d376fa7114d1..029efe16f8cc 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -43,7 +43,7 @@
#define SCU614 0x614 /* Disable GPIO Internal Pull-Down #1 */
#define SCU618 0x618 /* Disable GPIO Internal Pull-Down #2 */
#define SCU61C 0x61c /* Disable GPIO Internal Pull-Down #3 */
-#define SCU620 0x620 /* Disable GPIO Internal Pull-Down #4 */
+#define SCU630 0x630 /* Disable GPIO Internal Pull-Down #4 */
#define SCU634 0x634 /* Disable GPIO Internal Pull-Down #5 */
#define SCU638 0x638 /* Disable GPIO Internal Pull-Down #6 */
#define SCU690 0x690 /* Multi-function Pin Control #24 */
@@ -2495,38 +2495,38 @@ static struct aspeed_pin_config aspeed_g6_configs[] = {
ASPEED_PULL_DOWN_PINCONF(D14, SCU61C, 0),
/* GPIOS7 */
- ASPEED_PULL_DOWN_PINCONF(T24, SCU620, 23),
+ ASPEED_PULL_DOWN_PINCONF(T24, SCU630, 23),
/* GPIOS6 */
- ASPEED_PULL_DOWN_PINCONF(P23, SCU620, 22),
+ ASPEED_PULL_DOWN_PINCONF(P23, SCU630, 22),
/* GPIOS5 */
- ASPEED_PULL_DOWN_PINCONF(P24, SCU620, 21),
+ ASPEED_PULL_DOWN_PINCONF(P24, SCU630, 21),
/* GPIOS4 */
- ASPEED_PULL_DOWN_PINCONF(R26, SCU620, 20),
+ ASPEED_PULL_DOWN_PINCONF(R26, SCU630, 20),
/* GPIOS3*/
- ASPEED_PULL_DOWN_PINCONF(R24, SCU620, 19),
+ ASPEED_PULL_DOWN_PINCONF(R24, SCU630, 19),
/* GPIOS2 */
- ASPEED_PULL_DOWN_PINCONF(T26, SCU620, 18),
+ ASPEED_PULL_DOWN_PINCONF(T26, SCU630, 18),
/* GPIOS1 */
- ASPEED_PULL_DOWN_PINCONF(T25, SCU620, 17),
+ ASPEED_PULL_DOWN_PINCONF(T25, SCU630, 17),
/* GPIOS0 */
- ASPEED_PULL_DOWN_PINCONF(R23, SCU620, 16),
+ ASPEED_PULL_DOWN_PINCONF(R23, SCU630, 16),
/* GPIOR7 */
- ASPEED_PULL_DOWN_PINCONF(U26, SCU620, 15),
+ ASPEED_PULL_DOWN_PINCONF(U26, SCU630, 15),
/* GPIOR6 */
- ASPEED_PULL_DOWN_PINCONF(W26, SCU620, 14),
+ ASPEED_PULL_DOWN_PINCONF(W26, SCU630, 14),
/* GPIOR5 */
- ASPEED_PULL_DOWN_PINCONF(T23, SCU620, 13),
+ ASPEED_PULL_DOWN_PINCONF(T23, SCU630, 13),
/* GPIOR4 */
- ASPEED_PULL_DOWN_PINCONF(U25, SCU620, 12),
+ ASPEED_PULL_DOWN_PINCONF(U25, SCU630, 12),
/* GPIOR3*/
- ASPEED_PULL_DOWN_PINCONF(V26, SCU620, 11),
+ ASPEED_PULL_DOWN_PINCONF(V26, SCU630, 11),
/* GPIOR2 */
- ASPEED_PULL_DOWN_PINCONF(V24, SCU620, 10),
+ ASPEED_PULL_DOWN_PINCONF(V24, SCU630, 10),
/* GPIOR1 */
- ASPEED_PULL_DOWN_PINCONF(U24, SCU620, 9),
+ ASPEED_PULL_DOWN_PINCONF(U24, SCU630, 9),
/* GPIOR0 */
- ASPEED_PULL_DOWN_PINCONF(V25, SCU620, 8),
+ ASPEED_PULL_DOWN_PINCONF(V25, SCU630, 8),
/* GPIOX7 */
ASPEED_PULL_DOWN_PINCONF(AB10, SCU634, 31),
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 6649357637ff..cffeb869130d 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -2124,13 +2124,7 @@ int pinctrl_enable(struct pinctrl_dev *pctldev)
error = pinctrl_claim_hogs(pctldev);
if (error) {
- dev_err(pctldev->dev, "could not claim hogs: %i\n",
- error);
- pinctrl_free_pindescs(pctldev, pctldev->desc->pins,
- pctldev->desc->npins);
- mutex_destroy(&pctldev->mutex);
- kfree(pctldev);
-
+ dev_err(pctldev->dev, "could not claim hogs: %i\n", error);
return error;
}
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index df1efc2e5202..6a94ecd6a8de 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -220,14 +220,16 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
for (state = 0; ; state++) {
/* Retrieve the pinctrl-* property */
propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state);
- if (!propname)
- return -ENOMEM;
+ if (!propname) {
+ ret = -ENOMEM;
+ goto err;
+ }
prop = of_find_property(np, propname, &size);
kfree(propname);
if (!prop) {
if (state == 0) {
- of_node_put(np);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err;
}
break;
}
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index ac97724c59ba..4e87f5b875c0 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -231,6 +231,7 @@ static const unsigned int byt_score_pins_map[BYT_NGPIO_SCORE] = {
/* SCORE groups */
static const unsigned int byt_score_uart1_pins[] = { 70, 71, 72, 73 };
static const unsigned int byt_score_uart2_pins[] = { 74, 75, 76, 77 };
+static const unsigned int byt_score_uart3_pins[] = { 57, 61 };
static const unsigned int byt_score_pwm0_pins[] = { 94 };
static const unsigned int byt_score_pwm1_pins[] = { 95 };
@@ -278,37 +279,38 @@ static const unsigned int byt_score_plt_clk5_pins[] = { 101 };
static const unsigned int byt_score_smbus_pins[] = { 51, 52, 53 };
static const struct intel_pingroup byt_score_groups[] = {
- PIN_GROUP("uart1_grp", byt_score_uart1_pins, 1),
- PIN_GROUP("uart2_grp", byt_score_uart2_pins, 1),
- PIN_GROUP("pwm0_grp", byt_score_pwm0_pins, 1),
- PIN_GROUP("pwm1_grp", byt_score_pwm1_pins, 1),
- PIN_GROUP("ssp2_grp", byt_score_ssp2_pins, 1),
- PIN_GROUP("sio_spi_grp", byt_score_sio_spi_pins, 1),
- PIN_GROUP("i2c5_grp", byt_score_i2c5_pins, 1),
- PIN_GROUP("i2c6_grp", byt_score_i2c6_pins, 1),
- PIN_GROUP("i2c4_grp", byt_score_i2c4_pins, 1),
- PIN_GROUP("i2c3_grp", byt_score_i2c3_pins, 1),
- PIN_GROUP("i2c2_grp", byt_score_i2c2_pins, 1),
- PIN_GROUP("i2c1_grp", byt_score_i2c1_pins, 1),
- PIN_GROUP("i2c0_grp", byt_score_i2c0_pins, 1),
- PIN_GROUP("ssp0_grp", byt_score_ssp0_pins, 1),
- PIN_GROUP("ssp1_grp", byt_score_ssp1_pins, 1),
- PIN_GROUP("sdcard_grp", byt_score_sdcard_pins, byt_score_sdcard_mux_values),
- PIN_GROUP("sdio_grp", byt_score_sdio_pins, 1),
- PIN_GROUP("emmc_grp", byt_score_emmc_pins, 1),
- PIN_GROUP("lpc_grp", byt_score_ilb_lpc_pins, 1),
- PIN_GROUP("sata_grp", byt_score_sata_pins, 1),
- PIN_GROUP("plt_clk0_grp", byt_score_plt_clk0_pins, 1),
- PIN_GROUP("plt_clk1_grp", byt_score_plt_clk1_pins, 1),
- PIN_GROUP("plt_clk2_grp", byt_score_plt_clk2_pins, 1),
- PIN_GROUP("plt_clk3_grp", byt_score_plt_clk3_pins, 1),
- PIN_GROUP("plt_clk4_grp", byt_score_plt_clk4_pins, 1),
- PIN_GROUP("plt_clk5_grp", byt_score_plt_clk5_pins, 1),
- PIN_GROUP("smbus_grp", byt_score_smbus_pins, 1),
+ PIN_GROUP_GPIO("uart1_grp", byt_score_uart1_pins, 1),
+ PIN_GROUP_GPIO("uart2_grp", byt_score_uart2_pins, 1),
+ PIN_GROUP_GPIO("uart3_grp", byt_score_uart3_pins, 1),
+ PIN_GROUP_GPIO("pwm0_grp", byt_score_pwm0_pins, 1),
+ PIN_GROUP_GPIO("pwm1_grp", byt_score_pwm1_pins, 1),
+ PIN_GROUP_GPIO("ssp2_grp", byt_score_ssp2_pins, 1),
+ PIN_GROUP_GPIO("sio_spi_grp", byt_score_sio_spi_pins, 1),
+ PIN_GROUP_GPIO("i2c5_grp", byt_score_i2c5_pins, 1),
+ PIN_GROUP_GPIO("i2c6_grp", byt_score_i2c6_pins, 1),
+ PIN_GROUP_GPIO("i2c4_grp", byt_score_i2c4_pins, 1),
+ PIN_GROUP_GPIO("i2c3_grp", byt_score_i2c3_pins, 1),
+ PIN_GROUP_GPIO("i2c2_grp", byt_score_i2c2_pins, 1),
+ PIN_GROUP_GPIO("i2c1_grp", byt_score_i2c1_pins, 1),
+ PIN_GROUP_GPIO("i2c0_grp", byt_score_i2c0_pins, 1),
+ PIN_GROUP_GPIO("ssp0_grp", byt_score_ssp0_pins, 1),
+ PIN_GROUP_GPIO("ssp1_grp", byt_score_ssp1_pins, 1),
+ PIN_GROUP_GPIO("sdcard_grp", byt_score_sdcard_pins, byt_score_sdcard_mux_values),
+ PIN_GROUP_GPIO("sdio_grp", byt_score_sdio_pins, 1),
+ PIN_GROUP_GPIO("emmc_grp", byt_score_emmc_pins, 1),
+ PIN_GROUP_GPIO("lpc_grp", byt_score_ilb_lpc_pins, 1),
+ PIN_GROUP_GPIO("sata_grp", byt_score_sata_pins, 1),
+ PIN_GROUP_GPIO("plt_clk0_grp", byt_score_plt_clk0_pins, 1),
+ PIN_GROUP_GPIO("plt_clk1_grp", byt_score_plt_clk1_pins, 1),
+ PIN_GROUP_GPIO("plt_clk2_grp", byt_score_plt_clk2_pins, 1),
+ PIN_GROUP_GPIO("plt_clk3_grp", byt_score_plt_clk3_pins, 1),
+ PIN_GROUP_GPIO("plt_clk4_grp", byt_score_plt_clk4_pins, 1),
+ PIN_GROUP_GPIO("plt_clk5_grp", byt_score_plt_clk5_pins, 1),
+ PIN_GROUP_GPIO("smbus_grp", byt_score_smbus_pins, 1),
};
static const char * const byt_score_uart_groups[] = {
- "uart1_grp", "uart2_grp",
+ "uart1_grp", "uart2_grp", "uart3_grp",
};
static const char * const byt_score_pwm_groups[] = {
"pwm0_grp", "pwm1_grp",
@@ -332,12 +334,14 @@ static const char * const byt_score_plt_clk_groups[] = {
};
static const char * const byt_score_smbus_groups[] = { "smbus_grp" };
static const char * const byt_score_gpio_groups[] = {
- "uart1_grp", "uart2_grp", "pwm0_grp", "pwm1_grp", "ssp0_grp",
- "ssp1_grp", "ssp2_grp", "sio_spi_grp", "i2c0_grp", "i2c1_grp",
- "i2c2_grp", "i2c3_grp", "i2c4_grp", "i2c5_grp", "i2c6_grp",
- "sdcard_grp", "sdio_grp", "emmc_grp", "lpc_grp", "sata_grp",
- "plt_clk0_grp", "plt_clk1_grp", "plt_clk2_grp", "plt_clk3_grp",
- "plt_clk4_grp", "plt_clk5_grp", "smbus_grp",
+ "uart1_grp_gpio", "uart2_grp_gpio", "uart3_grp_gpio", "pwm0_grp_gpio",
+ "pwm1_grp_gpio", "ssp0_grp_gpio", "ssp1_grp_gpio", "ssp2_grp_gpio",
+ "sio_spi_grp_gpio", "i2c0_grp_gpio", "i2c1_grp_gpio", "i2c2_grp_gpio",
+ "i2c3_grp_gpio", "i2c4_grp_gpio", "i2c5_grp_gpio", "i2c6_grp_gpio",
+ "sdcard_grp_gpio", "sdio_grp_gpio", "emmc_grp_gpio", "lpc_grp_gpio",
+ "sata_grp_gpio", "plt_clk0_grp_gpio", "plt_clk1_grp_gpio",
+ "plt_clk2_grp_gpio", "plt_clk3_grp_gpio", "plt_clk4_grp_gpio",
+ "plt_clk5_grp_gpio", "smbus_grp_gpio",
};
static const struct intel_function byt_score_functions[] = {
@@ -456,8 +460,8 @@ static const struct intel_pingroup byt_sus_groups[] = {
PIN_GROUP("usb_oc_grp_gpio", byt_sus_usb_over_current_pins, byt_sus_usb_over_current_gpio_mode_values),
PIN_GROUP("usb_ulpi_grp_gpio", byt_sus_usb_ulpi_pins, byt_sus_usb_ulpi_gpio_mode_values),
PIN_GROUP("pcu_spi_grp_gpio", byt_sus_pcu_spi_pins, byt_sus_pcu_spi_gpio_mode_values),
- PIN_GROUP("pmu_clk1_grp", byt_sus_pmu_clk1_pins, 1),
- PIN_GROUP("pmu_clk2_grp", byt_sus_pmu_clk2_pins, 1),
+ PIN_GROUP_GPIO("pmu_clk1_grp", byt_sus_pmu_clk1_pins, 1),
+ PIN_GROUP_GPIO("pmu_clk2_grp", byt_sus_pmu_clk2_pins, 1),
};
static const char * const byt_sus_usb_groups[] = {
@@ -469,7 +473,7 @@ static const char * const byt_sus_pmu_clk_groups[] = {
};
static const char * const byt_sus_gpio_groups[] = {
"usb_oc_grp_gpio", "usb_ulpi_grp_gpio", "pcu_spi_grp_gpio",
- "pmu_clk1_grp", "pmu_clk2_grp",
+ "pmu_clk1_grp_gpio", "pmu_clk2_grp_gpio",
};
static const struct intel_function byt_sus_functions[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index fde65e18cd14..6981e2fab93f 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -179,6 +179,10 @@ struct intel_community {
.modes = __builtin_choose_expr(__builtin_constant_p((m)), NULL, (m)), \
}
+#define PIN_GROUP_GPIO(n, p, m) \
+ PIN_GROUP(n, p, m), \
+ PIN_GROUP(n "_gpio", p, 0)
+
#define FUNCTION(n, g) \
{ \
.func = PINCTRL_PINFUNCTION((n), (g), ARRAY_SIZE(g)), \
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index b6bc31abd2b0..b19bc391705e 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -165,20 +165,21 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SR, &ret);
break;
case PIN_CONFIG_INPUT_ENABLE:
- case PIN_CONFIG_OUTPUT_ENABLE:
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_IES, &ret);
+ if (!ret)
+ err = -EINVAL;
+ break;
+ case PIN_CONFIG_OUTPUT:
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret);
if (err)
break;
- /* CONFIG Current direction return value
- * ------------- ----------------- ----------------------
- * OUTPUT_ENABLE output 1 (= HW value)
- * input 0 (= HW value)
- * INPUT_ENABLE output 0 (= reverse HW value)
- * input 1 (= reverse HW value)
- */
- if (param == PIN_CONFIG_INPUT_ENABLE)
- ret = !ret;
+ if (!ret) {
+ err = -EINVAL;
+ break;
+ }
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DO, &ret);
break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret);
@@ -193,6 +194,8 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
}
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SMT, &ret);
+ if (!ret)
+ err = -EINVAL;
break;
case PIN_CONFIG_DRIVE_STRENGTH:
if (!hw->soc->drive_get)
@@ -281,26 +284,9 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
break;
err = hw->soc->bias_set_combo(hw, desc, 0, arg);
break;
- case PIN_CONFIG_OUTPUT_ENABLE:
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT,
- MTK_DISABLE);
- /* Keep set direction to consider the case that a GPIO pin
- * does not have SMT control
- */
- if (err != -ENOTSUPP)
- break;
-
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
- MTK_OUTPUT);
- break;
case PIN_CONFIG_INPUT_ENABLE:
/* regard all non-zero value as enable */
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_IES, !!arg);
- if (err)
- break;
-
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
- MTK_INPUT);
break;
case PIN_CONFIG_SLEW_RATE:
/* regard all non-zero value as enable */
diff --git a/drivers/pinctrl/meson/pinctrl-meson-a1.c b/drivers/pinctrl/meson/pinctrl-meson-a1.c
index 79f5d753d7e1..50a87d9618a8 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-a1.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-a1.c
@@ -250,7 +250,7 @@ static const unsigned int pdm_dclk_x_pins[] = { GPIOX_10 };
static const unsigned int pdm_din2_a_pins[] = { GPIOA_6 };
static const unsigned int pdm_din1_a_pins[] = { GPIOA_7 };
static const unsigned int pdm_din0_a_pins[] = { GPIOA_8 };
-static const unsigned int pdm_dclk_pins[] = { GPIOA_9 };
+static const unsigned int pdm_dclk_a_pins[] = { GPIOA_9 };
/* gen_clk */
static const unsigned int gen_clk_x_pins[] = { GPIOX_7 };
@@ -591,7 +591,7 @@ static struct meson_pmx_group meson_a1_periphs_groups[] = {
GROUP(pdm_din2_a, 3),
GROUP(pdm_din1_a, 3),
GROUP(pdm_din0_a, 3),
- GROUP(pdm_dclk, 3),
+ GROUP(pdm_dclk_a, 3),
GROUP(pwm_c_a, 3),
GROUP(pwm_b_a, 3),
@@ -755,7 +755,7 @@ static const char * const spi_a_groups[] = {
static const char * const pdm_groups[] = {
"pdm_din0_x", "pdm_din1_x", "pdm_din2_x", "pdm_dclk_x", "pdm_din2_a",
- "pdm_din1_a", "pdm_din0_a", "pdm_dclk",
+ "pdm_din1_a", "pdm_din0_a", "pdm_dclk_a",
};
static const char * const gen_clk_groups[] = {
diff --git a/drivers/pinctrl/pinctrl-scmi.c b/drivers/pinctrl/pinctrl-scmi.c
new file mode 100644
index 000000000000..036bc1e3fc6c
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-scmi.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Power Interface (SCMI) Protocol based pinctrl driver
+ *
+ * Copyright (C) 2024 EPAM
+ * Copyright 2024 NXP
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/scmi_protocol.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "pinctrl-utils.h"
+#include "core.h"
+#include "pinconf.h"
+
+#define DRV_NAME "scmi-pinctrl"
+
+/* Define num configs, if not large than 4 use stack, else use kcalloc() */
+#define SCMI_NUM_CONFIGS 4
+
+static const struct scmi_pinctrl_proto_ops *pinctrl_ops;
+
+struct scmi_pinctrl {
+ struct device *dev;
+ struct scmi_protocol_handle *ph;
+ struct pinctrl_dev *pctldev;
+ struct pinctrl_desc pctl_desc;
+ struct pinfunction *functions;
+ unsigned int nr_functions;
+ struct pinctrl_pin_desc *pins;
+ unsigned int nr_pins;
+};
+
+static int pinctrl_scmi_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ return pinctrl_ops->count_get(pmx->ph, GROUP_TYPE);
+}
+
+static const char *pinctrl_scmi_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ int ret;
+ const char *name;
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ ret = pinctrl_ops->name_get(pmx->ph, selector, GROUP_TYPE, &name);
+ if (ret) {
+ dev_err(pmx->dev, "get name failed with err %d", ret);
+ return NULL;
+ }
+
+ return name;
+}
+
+static int pinctrl_scmi_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ return pinctrl_ops->group_pins_get(pmx->ph, selector, pins, num_pins);
+}
+
+static const struct pinctrl_ops pinctrl_scmi_pinctrl_ops = {
+ .get_groups_count = pinctrl_scmi_get_groups_count,
+ .get_group_name = pinctrl_scmi_get_group_name,
+ .get_group_pins = pinctrl_scmi_get_group_pins,
+#ifdef CONFIG_OF
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
+#endif
+};
+
+static int pinctrl_scmi_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ return pinctrl_ops->count_get(pmx->ph, FUNCTION_TYPE);
+}
+
+static const char *pinctrl_scmi_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ int ret;
+ const char *name;
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ ret = pinctrl_ops->name_get(pmx->ph, selector, FUNCTION_TYPE, &name);
+ if (ret) {
+ dev_err(pmx->dev, "get name failed with err %d", ret);
+ return NULL;
+ }
+
+ return name;
+}
+
+static int pinctrl_scmi_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **p_groups,
+ unsigned int * const p_num_groups)
+{
+ struct pinfunction *func;
+ const unsigned int *group_ids;
+ unsigned int num_groups;
+ const char **groups;
+ int ret, i;
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ if (!p_groups || !p_num_groups)
+ return -EINVAL;
+
+ if (selector >= pmx->nr_functions)
+ return -EINVAL;
+
+ func = &pmx->functions[selector];
+ if (func->ngroups)
+ goto done;
+
+ ret = pinctrl_ops->function_groups_get(pmx->ph, selector, &num_groups,
+ &group_ids);
+ if (ret) {
+ dev_err(pmx->dev, "Unable to get function groups, err %d", ret);
+ return ret;
+ }
+ if (!num_groups)
+ return -EINVAL;
+
+ groups = kcalloc(num_groups, sizeof(*groups), GFP_KERNEL);
+ if (!groups)
+ return -ENOMEM;
+
+ for (i = 0; i < num_groups; i++) {
+ groups[i] = pinctrl_scmi_get_group_name(pctldev, group_ids[i]);
+ if (!groups[i]) {
+ ret = -EINVAL;
+ goto err_free;
+ }
+ }
+
+ func->ngroups = num_groups;
+ func->groups = groups;
+done:
+ *p_groups = func->groups;
+ *p_num_groups = func->ngroups;
+
+ return 0;
+
+err_free:
+ kfree(groups);
+
+ return ret;
+}
+
+static int pinctrl_scmi_func_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int selector, unsigned int group)
+{
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ return pinctrl_ops->mux_set(pmx->ph, selector, group);
+}
+
+static int pinctrl_scmi_request(struct pinctrl_dev *pctldev,
+ unsigned int offset)
+{
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ return pinctrl_ops->pin_request(pmx->ph, offset);
+}
+
+static int pinctrl_scmi_free(struct pinctrl_dev *pctldev, unsigned int offset)
+{
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ return pinctrl_ops->pin_free(pmx->ph, offset);
+}
+
+static const struct pinmux_ops pinctrl_scmi_pinmux_ops = {
+ .request = pinctrl_scmi_request,
+ .free = pinctrl_scmi_free,
+ .get_functions_count = pinctrl_scmi_get_functions_count,
+ .get_function_name = pinctrl_scmi_get_function_name,
+ .get_function_groups = pinctrl_scmi_get_function_groups,
+ .set_mux = pinctrl_scmi_func_set_mux,
+};
+
+static int pinctrl_scmi_map_pinconf_type(enum pin_config_param param,
+ enum scmi_pinctrl_conf_type *type)
+{
+ u32 arg = param;
+
+ switch (arg) {
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ *type = SCMI_PIN_BIAS_BUS_HOLD;
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ *type = SCMI_PIN_BIAS_DISABLE;
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ *type = SCMI_PIN_BIAS_HIGH_IMPEDANCE;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ *type = SCMI_PIN_BIAS_PULL_DOWN;
+ break;
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ *type = SCMI_PIN_BIAS_PULL_DEFAULT;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ *type = SCMI_PIN_BIAS_PULL_UP;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ *type = SCMI_PIN_DRIVE_OPEN_DRAIN;
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_SOURCE:
+ *type = SCMI_PIN_DRIVE_OPEN_SOURCE;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ *type = SCMI_PIN_DRIVE_PUSH_PULL;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ *type = SCMI_PIN_DRIVE_STRENGTH;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH_UA:
+ *type = SCMI_PIN_DRIVE_STRENGTH;
+ break;
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ *type = SCMI_PIN_INPUT_DEBOUNCE;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ *type = SCMI_PIN_INPUT_MODE;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT:
+ *type = SCMI_PIN_INPUT_SCHMITT;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ *type = SCMI_PIN_INPUT_MODE;
+ break;
+ case PIN_CONFIG_MODE_LOW_POWER:
+ *type = SCMI_PIN_LOW_POWER_MODE;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ *type = SCMI_PIN_OUTPUT_VALUE;
+ break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ *type = SCMI_PIN_OUTPUT_MODE;
+ break;
+ case PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS:
+ *type = SCMI_PIN_OUTPUT_VALUE;
+ break;
+ case PIN_CONFIG_POWER_SOURCE:
+ *type = SCMI_PIN_POWER_SOURCE;
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ *type = SCMI_PIN_SLEW_RATE;
+ break;
+ case SCMI_PIN_OEM_START ... SCMI_PIN_OEM_END:
+ *type = arg;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int pinctrl_scmi_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
+{
+ int ret;
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param config_type;
+ enum scmi_pinctrl_conf_type type;
+ u32 config_value;
+
+ if (!config)
+ return -EINVAL;
+
+ config_type = pinconf_to_config_param(*config);
+
+ ret = pinctrl_scmi_map_pinconf_type(config_type, &type);
+ if (ret)
+ return ret;
+
+ ret = pinctrl_ops->settings_get_one(pmx->ph, pin, PIN_TYPE, type,
+ &config_value);
+ /* Convert SCMI error code to PINCTRL expected error code */
+ if (ret == -EOPNOTSUPP)
+ return -ENOTSUPP;
+ if (ret)
+ return ret;
+
+ *config = pinconf_to_config_packed(config_type, config_value);
+
+ return 0;
+}
+
+static int
+pinctrl_scmi_alloc_configs(struct pinctrl_dev *pctldev, u32 num_configs,
+ u32 **p_config_value,
+ enum scmi_pinctrl_conf_type **p_config_type)
+{
+ if (num_configs <= SCMI_NUM_CONFIGS)
+ return 0;
+
+ *p_config_value = kcalloc(num_configs, sizeof(**p_config_value), GFP_KERNEL);
+ if (!*p_config_value)
+ return -ENOMEM;
+
+ *p_config_type = kcalloc(num_configs, sizeof(**p_config_type), GFP_KERNEL);
+ if (!*p_config_type) {
+ kfree(*p_config_value);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+pinctrl_scmi_free_configs(struct pinctrl_dev *pctldev, u32 num_configs,
+ u32 **p_config_value,
+ enum scmi_pinctrl_conf_type **p_config_type)
+{
+ if (num_configs <= SCMI_NUM_CONFIGS)
+ return;
+
+ kfree(*p_config_value);
+ kfree(*p_config_type);
+}
+
+static int pinctrl_scmi_pinconf_set(struct pinctrl_dev *pctldev,
+ unsigned int pin,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ int i, ret;
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ enum scmi_pinctrl_conf_type config_type[SCMI_NUM_CONFIGS];
+ u32 config_value[SCMI_NUM_CONFIGS];
+ enum scmi_pinctrl_conf_type *p_config_type = config_type;
+ u32 *p_config_value = config_value;
+ enum pin_config_param param;
+
+ if (!configs || !num_configs)
+ return -EINVAL;
+
+ ret = pinctrl_scmi_alloc_configs(pctldev, num_configs, &p_config_type,
+ &p_config_value);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ ret = pinctrl_scmi_map_pinconf_type(param, &p_config_type[i]);
+ if (ret) {
+ dev_err(pmx->dev, "Error map pinconf_type %d\n", ret);
+ goto free_config;
+ }
+ p_config_value[i] = pinconf_to_config_argument(configs[i]);
+ }
+
+ ret = pinctrl_ops->settings_conf(pmx->ph, pin, PIN_TYPE, num_configs,
+ p_config_type, p_config_value);
+ if (ret)
+ dev_err(pmx->dev, "Error parsing config %d\n", ret);
+
+free_config:
+ pinctrl_scmi_free_configs(pctldev, num_configs, &p_config_type,
+ &p_config_value);
+ return ret;
+}
+
+static int pinctrl_scmi_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ int i, ret;
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ enum scmi_pinctrl_conf_type config_type[SCMI_NUM_CONFIGS];
+ u32 config_value[SCMI_NUM_CONFIGS];
+ enum scmi_pinctrl_conf_type *p_config_type = config_type;
+ u32 *p_config_value = config_value;
+ enum pin_config_param param;
+
+ if (!configs || !num_configs)
+ return -EINVAL;
+
+ ret = pinctrl_scmi_alloc_configs(pctldev, num_configs, &p_config_type,
+ &p_config_value);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ ret = pinctrl_scmi_map_pinconf_type(param, &p_config_type[i]);
+ if (ret) {
+ dev_err(pmx->dev, "Error map pinconf_type %d\n", ret);
+ goto free_config;
+ }
+
+ p_config_value[i] = pinconf_to_config_argument(configs[i]);
+ }
+
+ ret = pinctrl_ops->settings_conf(pmx->ph, group, GROUP_TYPE,
+ num_configs, p_config_type,
+ p_config_value);
+ if (ret)
+ dev_err(pmx->dev, "Error parsing config %d", ret);
+
+free_config:
+ pinctrl_scmi_free_configs(pctldev, num_configs, &p_config_type,
+ &p_config_value);
+ return ret;
+};
+
+static int pinctrl_scmi_pinconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ unsigned long *config)
+{
+ int ret;
+ struct scmi_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param config_type;
+ enum scmi_pinctrl_conf_type type;
+ u32 config_value;
+
+ if (!config)
+ return -EINVAL;
+
+ config_type = pinconf_to_config_param(*config);
+ ret = pinctrl_scmi_map_pinconf_type(config_type, &type);
+ if (ret) {
+ dev_err(pmx->dev, "Error map pinconf_type %d\n", ret);
+ return ret;
+ }
+
+ ret = pinctrl_ops->settings_get_one(pmx->ph, group, GROUP_TYPE, type,
+ &config_value);
+ /* Convert SCMI error code to PINCTRL expected error code */
+ if (ret == -EOPNOTSUPP)
+ return -ENOTSUPP;
+ if (ret)
+ return ret;
+
+ *config = pinconf_to_config_packed(config_type, config_value);
+
+ return 0;
+}
+
+static const struct pinconf_ops pinctrl_scmi_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = pinctrl_scmi_pinconf_get,
+ .pin_config_set = pinctrl_scmi_pinconf_set,
+ .pin_config_group_set = pinctrl_scmi_pinconf_group_set,
+ .pin_config_group_get = pinctrl_scmi_pinconf_group_get,
+ .pin_config_config_dbg_show = pinconf_generic_dump_config,
+};
+
+static int pinctrl_scmi_get_pins(struct scmi_pinctrl *pmx,
+ struct pinctrl_desc *desc)
+{
+ struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+ int ret, i;
+
+ npins = pinctrl_ops->count_get(pmx->ph, PIN_TYPE);
+ /*
+ * npins will never be zero, the scmi pinctrl driver has bailed out
+ * if npins is zero.
+ */
+ pins = devm_kmalloc_array(pmx->dev, npins, sizeof(*pins), GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ for (i = 0; i < npins; i++) {
+ pins[i].number = i;
+ /*
+ * The memory for name is handled by the scmi firmware driver,
+ * no need free here
+ */
+ ret = pinctrl_ops->name_get(pmx->ph, i, PIN_TYPE, &pins[i].name);
+ if (ret)
+ return dev_err_probe(pmx->dev, ret,
+ "Can't get name for pin %d", i);
+ }
+
+ desc->npins = npins;
+ desc->pins = pins;
+ dev_dbg(pmx->dev, "got pins %u", npins);
+
+ return 0;
+}
+
+static int scmi_pinctrl_probe(struct scmi_device *sdev)
+{
+ int ret;
+ struct device *dev = &sdev->dev;
+ struct scmi_pinctrl *pmx;
+ const struct scmi_handle *handle;
+ struct scmi_protocol_handle *ph;
+
+ if (!sdev->handle)
+ return -EINVAL;
+
+ handle = sdev->handle;
+
+ pinctrl_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PINCTRL, &ph);
+ if (IS_ERR(pinctrl_ops))
+ return PTR_ERR(pinctrl_ops);
+
+ pmx = devm_kzalloc(dev, sizeof(*pmx), GFP_KERNEL);
+ if (!pmx)
+ return -ENOMEM;
+
+ pmx->ph = ph;
+
+ pmx->dev = dev;
+ pmx->pctl_desc.name = DRV_NAME;
+ pmx->pctl_desc.owner = THIS_MODULE;
+ pmx->pctl_desc.pctlops = &pinctrl_scmi_pinctrl_ops;
+ pmx->pctl_desc.pmxops = &pinctrl_scmi_pinmux_ops;
+ pmx->pctl_desc.confops = &pinctrl_scmi_pinconf_ops;
+
+ ret = pinctrl_scmi_get_pins(pmx, &pmx->pctl_desc);
+ if (ret)
+ return ret;
+
+ ret = devm_pinctrl_register_and_init(dev, &pmx->pctl_desc, pmx,
+ &pmx->pctldev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register pinctrl\n");
+
+ pmx->nr_functions = pinctrl_scmi_get_functions_count(pmx->pctldev);
+ pmx->functions = devm_kcalloc(dev, pmx->nr_functions,
+ sizeof(*pmx->functions), GFP_KERNEL);
+ if (!pmx->functions)
+ return -ENOMEM;
+
+ return pinctrl_enable(pmx->pctldev);
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_PINCTRL, "pinctrl" },
+ { }
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_pinctrl_driver = {
+ .name = DRV_NAME,
+ .probe = scmi_pinctrl_probe,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_pinctrl_driver);
+
+MODULE_AUTHOR("Oleksii Moisieiev <oleksii_moisieiev@epam.com>");
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("ARM SCMI pin controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index eb5a8c654260..20425afc6b33 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -2045,7 +2045,9 @@ static void rzg2l_gpio_irq_restore(struct rzg2l_pinctrl *pctrl)
for (unsigned int i = 0; i < RZG2L_TINT_MAX_INTERRUPT; i++) {
struct irq_data *data;
+ unsigned long flags;
unsigned int virq;
+ int ret;
if (!pctrl->hwirq[i])
continue;
@@ -2063,8 +2065,18 @@ static void rzg2l_gpio_irq_restore(struct rzg2l_pinctrl *pctrl)
continue;
}
- if (!irqd_irq_disabled(data))
+ /*
+ * This has to be atomically executed to protect against a concurrent
+ * interrupt.
+ */
+ raw_spin_lock_irqsave(&pctrl->lock.rlock, flags);
+ ret = rzg2l_gpio_irq_set_type(data, irqd_get_trigger_type(data));
+ if (!ret && !irqd_irq_disabled(data))
rzg2l_gpio_irq_enable(data);
+ raw_spin_unlock_irqrestore(&pctrl->lock.rlock, flags);
+
+ if (ret)
+ dev_crit(pctrl->dev, "Failed to set IRQ type for virq=%u\n", virq);
}
}
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 7a83346bfa53..073616b5b5a0 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -132,6 +132,7 @@ config CROS_EC_UART
config CROS_EC_LPC
tristate "ChromeOS Embedded Controller (LPC)"
depends on CROS_EC && ACPI && (X86 || COMPILE_TEST)
+ depends on HAS_IOPORT
help
If you say Y here, you get support for talking to the ChromeOS EC
over an LPC bus, including the LPC Microchip EC (MEC) variant.
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index badc68bbae8c..47d19f7e295a 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -432,6 +432,12 @@ static void cros_ec_send_resume_event(struct cros_ec_device *ec_dev)
void cros_ec_resume_complete(struct cros_ec_device *ec_dev)
{
cros_ec_send_resume_event(ec_dev);
+
+ /*
+ * Let the mfd devices know about events that occur during
+ * suspend. This way the clients know what to do with them.
+ */
+ cros_ec_report_events_during_suspend(ec_dev);
}
EXPORT_SYMBOL(cros_ec_resume_complete);
@@ -442,12 +448,6 @@ static void cros_ec_enable_irq(struct cros_ec_device *ec_dev)
if (ec_dev->wake_enabled)
disable_irq_wake(ec_dev->irq);
-
- /*
- * Let the mfd devices know about events that occur during
- * suspend. This way the clients know what to do with them.
- */
- cros_ec_report_events_during_suspend(ec_dev);
}
/**
@@ -475,8 +475,8 @@ EXPORT_SYMBOL(cros_ec_resume_early);
*/
int cros_ec_resume(struct cros_ec_device *ec_dev)
{
- cros_ec_enable_irq(ec_dev);
- cros_ec_send_resume_event(ec_dev);
+ cros_ec_resume_early(ec_dev);
+ cros_ec_resume_complete(ec_dev);
return 0;
}
EXPORT_SYMBOL(cros_ec_resume);
diff --git a/drivers/platform/chrome/cros_ec_chardev.c b/drivers/platform/chrome/cros_ec_chardev.c
index 81950bb2c6da..7f034ead7ae4 100644
--- a/drivers/platform/chrome/cros_ec_chardev.c
+++ b/drivers/platform/chrome/cros_ec_chardev.c
@@ -14,6 +14,7 @@
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/platform_data/cros_ec_chardev.h>
@@ -403,17 +404,23 @@ static void cros_ec_chardev_remove(struct platform_device *pdev)
misc_deregister(&data->misc);
}
+static const struct platform_device_id cros_ec_chardev_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_ec_chardev_id);
+
static struct platform_driver cros_ec_chardev_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = cros_ec_chardev_probe,
.remove_new = cros_ec_chardev_remove,
+ .id_table = cros_ec_chardev_id,
};
module_platform_driver(cros_ec_chardev_driver);
-MODULE_ALIAS("platform:" DRV_NAME);
MODULE_AUTHOR("Enric Balletbo i Serra <enric.balletbo@collabora.com>");
MODULE_DESCRIPTION("ChromeOS EC Miscellaneous Character Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 6bf6f0e7b597..e1d313246beb 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -7,6 +7,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/fs.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_data/cros_ec_commands.h>
@@ -564,6 +565,12 @@ static int __maybe_unused cros_ec_debugfs_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(cros_ec_debugfs_pm_ops,
cros_ec_debugfs_suspend, cros_ec_debugfs_resume);
+static const struct platform_device_id cros_ec_debugfs_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_ec_debugfs_id);
+
static struct platform_driver cros_ec_debugfs_driver = {
.driver = {
.name = DRV_NAME,
@@ -572,10 +579,10 @@ static struct platform_driver cros_ec_debugfs_driver = {
},
.probe = cros_ec_debugfs_probe,
.remove_new = cros_ec_debugfs_remove,
+ .id_table = cros_ec_debugfs_id,
};
module_platform_driver(cros_ec_debugfs_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Debug logs for ChromeOS EC");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
index 6677cc6c4984..1e69f61115a4 100644
--- a/drivers/platform/chrome/cros_ec_lightbar.c
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -9,6 +9,7 @@
#include <linux/fs.h>
#include <linux/kobject.h>
#include <linux/kstrtox.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
@@ -594,6 +595,12 @@ static int __maybe_unused cros_ec_lightbar_suspend(struct device *dev)
static SIMPLE_DEV_PM_OPS(cros_ec_lightbar_pm_ops,
cros_ec_lightbar_suspend, cros_ec_lightbar_resume);
+static const struct platform_device_id cros_ec_lightbar_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_ec_lightbar_id);
+
static struct platform_driver cros_ec_lightbar_driver = {
.driver = {
.name = DRV_NAME,
@@ -602,10 +609,10 @@ static struct platform_driver cros_ec_lightbar_driver = {
},
.probe = cros_ec_lightbar_probe,
.remove_new = cros_ec_lightbar_remove,
+ .id_table = cros_ec_lightbar_id,
};
module_platform_driver(cros_ec_lightbar_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Expose the Chromebook Pixel's lightbar to userspace");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index f0f3d3d56157..ddfbfec44f4c 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -34,6 +34,32 @@
/* True if ACPI device is present */
static bool cros_ec_lpc_acpi_device_found;
+/*
+ * Indicates that lpc_driver_data.quirk_mmio_memory_base should
+ * be used as the base port for EC mapped memory.
+ */
+#define CROS_EC_LPC_QUIRK_REMAP_MEMORY BIT(0)
+
+/**
+ * struct lpc_driver_data - driver data attached to a DMI device ID to indicate
+ * hardware quirks.
+ * @quirks: a bitfield composed of quirks from CROS_EC_LPC_QUIRK_*
+ * @quirk_mmio_memory_base: The first I/O port addressing EC mapped memory (used
+ * when quirk ...REMAP_MEMORY is set.)
+ */
+struct lpc_driver_data {
+ u32 quirks;
+ u16 quirk_mmio_memory_base;
+};
+
+/**
+ * struct cros_ec_lpc - LPC device-specific data
+ * @mmio_memory_base: The first I/O port addressing EC mapped memory.
+ */
+struct cros_ec_lpc {
+ u16 mmio_memory_base;
+};
+
/**
* struct lpc_driver_ops - LPC driver operations
* @read: Copy length bytes from EC address offset into buffer dest. Returns
@@ -290,6 +316,7 @@ done:
static int cros_ec_lpc_readmem(struct cros_ec_device *ec, unsigned int offset,
unsigned int bytes, void *dest)
{
+ struct cros_ec_lpc *ec_lpc = ec->priv;
int i = offset;
char *s = dest;
int cnt = 0;
@@ -299,13 +326,13 @@ static int cros_ec_lpc_readmem(struct cros_ec_device *ec, unsigned int offset,
/* fixed length */
if (bytes) {
- cros_ec_lpc_ops.read(EC_LPC_ADDR_MEMMAP + offset, bytes, s);
+ cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + offset, bytes, s);
return bytes;
}
/* string */
for (; i < EC_MEMMAP_SIZE; i++, s++) {
- cros_ec_lpc_ops.read(EC_LPC_ADDR_MEMMAP + i, 1, s);
+ cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + i, 1, s);
cnt++;
if (!*s)
break;
@@ -353,8 +380,28 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
struct acpi_device *adev;
acpi_status status;
struct cros_ec_device *ec_dev;
+ struct cros_ec_lpc *ec_lpc;
+ struct lpc_driver_data *driver_data;
u8 buf[2] = {};
int irq, ret;
+ u32 quirks;
+
+ ec_lpc = devm_kzalloc(dev, sizeof(*ec_lpc), GFP_KERNEL);
+ if (!ec_lpc)
+ return -ENOMEM;
+
+ ec_lpc->mmio_memory_base = EC_LPC_ADDR_MEMMAP;
+
+ driver_data = platform_get_drvdata(pdev);
+ if (driver_data) {
+ quirks = driver_data->quirks;
+
+ if (quirks)
+ dev_info(dev, "loaded with quirks %8.08x\n", quirks);
+
+ if (quirks & CROS_EC_LPC_QUIRK_REMAP_MEMORY)
+ ec_lpc->mmio_memory_base = driver_data->quirk_mmio_memory_base;
+ }
/*
* The Framework Laptop (and possibly other non-ChromeOS devices)
@@ -380,7 +427,7 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
cros_ec_lpc_ops.write = cros_ec_lpc_mec_write_bytes;
cros_ec_lpc_ops.read(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID, 2, buf);
if (buf[0] != 'E' || buf[1] != 'C') {
- if (!devm_request_region(dev, EC_LPC_ADDR_MEMMAP, EC_MEMMAP_SIZE,
+ if (!devm_request_region(dev, ec_lpc->mmio_memory_base, EC_MEMMAP_SIZE,
dev_name(dev))) {
dev_err(dev, "couldn't reserve memmap region\n");
return -EBUSY;
@@ -389,7 +436,7 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
/* Re-assign read/write operations for the non MEC variant */
cros_ec_lpc_ops.read = cros_ec_lpc_read_bytes;
cros_ec_lpc_ops.write = cros_ec_lpc_write_bytes;
- cros_ec_lpc_ops.read(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID, 2,
+ cros_ec_lpc_ops.read(ec_lpc->mmio_memory_base + EC_MEMMAP_ID, 2,
buf);
if (buf[0] != 'E' || buf[1] != 'C') {
dev_err(dev, "EC ID not detected\n");
@@ -423,6 +470,7 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
ec_dev->din_size = sizeof(struct ec_host_response) +
sizeof(struct ec_response_get_protocol_info);
ec_dev->dout_size = sizeof(struct ec_host_request);
+ ec_dev->priv = ec_lpc;
/*
* Some boards do not have an IRQ allotted for cros_ec_lpc,
@@ -479,6 +527,11 @@ static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, cros_ec_lpc_acpi_device_ids);
+static const struct lpc_driver_data framework_laptop_amd_lpc_driver_data __initconst = {
+ .quirks = CROS_EC_LPC_QUIRK_REMAP_MEMORY,
+ .quirk_mmio_memory_base = 0xE00,
+};
+
static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = {
{
/*
@@ -533,7 +586,16 @@ static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = {
},
/* A small number of non-Chromebook/box machines also use the ChromeOS EC */
{
- /* the Framework Laptop */
+ /* the Framework Laptop 13 (AMD Ryzen) and 16 (AMD Ryzen) */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMD Ryzen"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Laptop"),
+ },
+ .driver_data = (void *)&framework_laptop_amd_lpc_driver_data,
+ },
+ {
+ /* the Framework Laptop (Intel 11th, 12th, 13th Generation) */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
DMI_MATCH(DMI_PRODUCT_NAME, "Laptop"),
@@ -610,14 +672,16 @@ static int __init cros_ec_lpc_init(void)
{
int ret;
acpi_status status;
+ const struct dmi_system_id *dmi_match;
status = acpi_get_devices(ACPI_DRV_NAME, cros_ec_lpc_parse_device,
&cros_ec_lpc_acpi_device_found, NULL);
if (ACPI_FAILURE(status))
pr_warn(DRV_NAME ": Looking for %s failed\n", ACPI_DRV_NAME);
- if (!cros_ec_lpc_acpi_device_found &&
- !dmi_check_system(cros_ec_lpc_dmi_table)) {
+ dmi_match = dmi_first_match(cros_ec_lpc_dmi_table);
+
+ if (!cros_ec_lpc_acpi_device_found && !dmi_match) {
pr_err(DRV_NAME ": unsupported system.\n");
return -ENODEV;
}
@@ -630,6 +694,9 @@ static int __init cros_ec_lpc_init(void)
}
if (!cros_ec_lpc_acpi_device_found) {
+ /* Pass the DMI match's driver data down to the platform device */
+ platform_set_drvdata(&cros_ec_lpc_device, dmi_match->driver_data);
+
/* Register the device, and it'll get hooked up automatically */
ret = platform_device_register(&cros_ec_lpc_device);
if (ret) {
diff --git a/drivers/platform/chrome/cros_ec_proto_test.c b/drivers/platform/chrome/cros_ec_proto_test.c
index b6169d6f2467..41378c2ee6a0 100644
--- a/drivers/platform/chrome/cros_ec_proto_test.c
+++ b/drivers/platform/chrome/cros_ec_proto_test.c
@@ -1543,21 +1543,18 @@ static void cros_ec_proto_test_cmd_xfer_normal(struct kunit *test)
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
- struct {
- struct cros_ec_command msg;
- u8 data[0x100];
- } __packed buf;
+ DEFINE_RAW_FLEX(struct cros_ec_command, buf, data, 0x100);
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
ec_dev->max_passthru = 0xdd;
- buf.msg.version = 0;
- buf.msg.command = EC_CMD_HELLO;
- buf.msg.insize = 4;
- buf.msg.outsize = 2;
- buf.data[0] = 0x55;
- buf.data[1] = 0xaa;
+ buf->version = 0;
+ buf->command = EC_CMD_HELLO;
+ buf->insize = 4;
+ buf->outsize = 2;
+ buf->data[0] = 0x55;
+ buf->data[1] = 0xaa;
{
u8 *data;
@@ -1572,7 +1569,7 @@ static void cros_ec_proto_test_cmd_xfer_normal(struct kunit *test)
data[3] = 0x33;
}
- ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ ret = cros_ec_cmd_xfer(ec_dev, buf);
KUNIT_EXPECT_EQ(test, ret, 4);
{
@@ -1590,10 +1587,10 @@ static void cros_ec_proto_test_cmd_xfer_normal(struct kunit *test)
KUNIT_EXPECT_EQ(test, data[0], 0x55);
KUNIT_EXPECT_EQ(test, data[1], 0xaa);
- KUNIT_EXPECT_EQ(test, buf.data[0], 0xaa);
- KUNIT_EXPECT_EQ(test, buf.data[1], 0x55);
- KUNIT_EXPECT_EQ(test, buf.data[2], 0xcc);
- KUNIT_EXPECT_EQ(test, buf.data[3], 0x33);
+ KUNIT_EXPECT_EQ(test, buf->data[0], 0xaa);
+ KUNIT_EXPECT_EQ(test, buf->data[1], 0x55);
+ KUNIT_EXPECT_EQ(test, buf->data[2], 0xcc);
+ KUNIT_EXPECT_EQ(test, buf->data[3], 0x33);
}
}
@@ -1603,26 +1600,23 @@ static void cros_ec_proto_test_cmd_xfer_excess_msg_insize(struct kunit *test)
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
- struct {
- struct cros_ec_command msg;
- u8 data[0x100];
- } __packed buf;
+ DEFINE_RAW_FLEX(struct cros_ec_command, buf, data, 0x100);
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
ec_dev->max_passthru = 0xdd;
- buf.msg.version = 0;
- buf.msg.command = EC_CMD_HELLO;
- buf.msg.insize = 0xee + 1;
- buf.msg.outsize = 2;
+ buf->version = 0;
+ buf->command = EC_CMD_HELLO;
+ buf->insize = 0xee + 1;
+ buf->outsize = 2;
{
mock = cros_kunit_ec_xfer_mock_add(test, 0xcc);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
- ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ ret = cros_ec_cmd_xfer(ec_dev, buf);
KUNIT_EXPECT_EQ(test, ret, 0xcc);
{
@@ -1641,21 +1635,18 @@ static void cros_ec_proto_test_cmd_xfer_excess_msg_outsize_without_passthru(stru
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
int ret;
- struct {
- struct cros_ec_command msg;
- u8 data[0x100];
- } __packed buf;
+ DEFINE_RAW_FLEX(struct cros_ec_command, buf, data, 0x100);
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
ec_dev->max_passthru = 0xdd;
- buf.msg.version = 0;
- buf.msg.command = EC_CMD_HELLO;
- buf.msg.insize = 4;
- buf.msg.outsize = 0xff + 1;
+ buf->version = 0;
+ buf->command = EC_CMD_HELLO;
+ buf->insize = 4;
+ buf->outsize = 0xff + 1;
- ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ ret = cros_ec_cmd_xfer(ec_dev, buf);
KUNIT_EXPECT_EQ(test, ret, -EMSGSIZE);
}
@@ -1664,21 +1655,18 @@ static void cros_ec_proto_test_cmd_xfer_excess_msg_outsize_with_passthru(struct
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
int ret;
- struct {
- struct cros_ec_command msg;
- u8 data[0x100];
- } __packed buf;
+ DEFINE_RAW_FLEX(struct cros_ec_command, buf, data, 0x100);
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
ec_dev->max_passthru = 0xdd;
- buf.msg.version = 0;
- buf.msg.command = EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) + EC_CMD_HELLO;
- buf.msg.insize = 4;
- buf.msg.outsize = 0xdd + 1;
+ buf->version = 0;
+ buf->command = EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) + EC_CMD_HELLO;
+ buf->insize = 4;
+ buf->outsize = 0xdd + 1;
- ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ ret = cros_ec_cmd_xfer(ec_dev, buf);
KUNIT_EXPECT_EQ(test, ret, -EMSGSIZE);
}
diff --git a/drivers/platform/chrome/cros_ec_sensorhub.c b/drivers/platform/chrome/cros_ec_sensorhub.c
index 31fb8bdaad5a..50cdae67fa32 100644
--- a/drivers/platform/chrome/cros_ec_sensorhub.c
+++ b/drivers/platform/chrome/cros_ec_sensorhub.c
@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
@@ -247,17 +248,23 @@ static SIMPLE_DEV_PM_OPS(cros_ec_sensorhub_pm_ops,
cros_ec_sensorhub_suspend,
cros_ec_sensorhub_resume);
+static const struct platform_device_id cros_ec_sensorhub_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_ec_sensorhub_id);
+
static struct platform_driver cros_ec_sensorhub_driver = {
.driver = {
.name = DRV_NAME,
.pm = &cros_ec_sensorhub_pm_ops,
},
.probe = cros_ec_sensorhub_probe,
+ .id_table = cros_ec_sensorhub_id,
};
module_platform_driver(cros_ec_sensorhub_driver);
-MODULE_ALIAS("platform:" DRV_NAME);
MODULE_AUTHOR("Gwendal Grignou <gwendal@chromium.org>");
MODULE_DESCRIPTION("ChromeOS EC MEMS Sensor Hub Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index 93e67ab4af06..9c944146ee50 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -8,6 +8,7 @@
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/kobject.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
@@ -347,16 +348,22 @@ static void cros_ec_sysfs_remove(struct platform_device *pd)
sysfs_remove_group(&ec_dev->class_dev.kobj, &cros_ec_attr_group);
}
+static const struct platform_device_id cros_ec_sysfs_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_ec_sysfs_id);
+
static struct platform_driver cros_ec_sysfs_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = cros_ec_sysfs_probe,
.remove_new = cros_ec_sysfs_remove,
+ .id_table = cros_ec_sysfs_id,
};
module_platform_driver(cros_ec_sysfs_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Expose the ChromeOS EC through sysfs");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/cros_ec_uart.c b/drivers/platform/chrome/cros_ec_uart.c
index 8ea867c2a01a..62bc24f6dcc7 100644
--- a/drivers/platform/chrome/cros_ec_uart.c
+++ b/drivers/platform/chrome/cros_ec_uart.c
@@ -263,12 +263,6 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
if (!ec_dev)
return -ENOMEM;
- ret = devm_serdev_device_open(dev, serdev);
- if (ret) {
- dev_err(dev, "Unable to open UART device");
- return ret;
- }
-
serdev_device_set_drvdata(serdev, ec_dev);
init_waitqueue_head(&ec_uart->response.wait_queue);
@@ -280,14 +274,6 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
return ret;
}
- ret = serdev_device_set_baudrate(serdev, ec_uart->baudrate);
- if (ret < 0) {
- dev_err(dev, "Failed to set up host baud rate (%d)", ret);
- return ret;
- }
-
- serdev_device_set_flow_control(serdev, ec_uart->flowcontrol);
-
/* Initialize ec_dev for cros_ec */
ec_dev->phys_name = dev_name(dev);
ec_dev->dev = dev;
@@ -301,6 +287,20 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
serdev_device_set_client_ops(serdev, &cros_ec_uart_client_ops);
+ ret = devm_serdev_device_open(dev, serdev);
+ if (ret) {
+ dev_err(dev, "Unable to open UART device");
+ return ret;
+ }
+
+ ret = serdev_device_set_baudrate(serdev, ec_uart->baudrate);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set up host baud rate (%d)", ret);
+ return ret;
+ }
+
+ serdev_device_set_flow_control(serdev, ec_uart->flowcontrol);
+
return cros_ec_register(ec_dev);
}
diff --git a/drivers/platform/chrome/cros_ec_vbc.c b/drivers/platform/chrome/cros_ec_vbc.c
index 274ea0c64b33..787a19db4911 100644
--- a/drivers/platform/chrome/cros_ec_vbc.c
+++ b/drivers/platform/chrome/cros_ec_vbc.c
@@ -6,6 +6,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
@@ -133,16 +134,22 @@ static void cros_ec_vbc_remove(struct platform_device *pd)
&cros_ec_vbc_attr_group);
}
+static const struct platform_device_id cros_ec_vbc_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_ec_vbc_id);
+
static struct platform_driver cros_ec_vbc_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = cros_ec_vbc_probe,
.remove_new = cros_ec_vbc_remove,
+ .id_table = cros_ec_vbc_id,
};
module_platform_driver(cros_ec_vbc_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Expose the vboot context nvram to userspace");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/cros_hps_i2c.c b/drivers/platform/chrome/cros_hps_i2c.c
index b31313080332..dd14957ec39f 100644
--- a/drivers/platform/chrome/cros_hps_i2c.c
+++ b/drivers/platform/chrome/cros_hps_i2c.c
@@ -126,7 +126,7 @@ static int hps_resume(struct device *dev)
hps_set_power(hps, true);
return 0;
}
-static UNIVERSAL_DEV_PM_OPS(hps_pm_ops, hps_suspend, hps_resume, NULL);
+static DEFINE_RUNTIME_DEV_PM_OPS(hps_pm_ops, hps_suspend, hps_resume, NULL);
static const struct i2c_device_id hps_i2c_id[] = {
{ "cros-hps", 0 },
@@ -148,7 +148,7 @@ static struct i2c_driver hps_i2c_driver = {
.id_table = hps_i2c_id,
.driver = {
.name = "cros-hps",
- .pm = &hps_pm_ops,
+ .pm = pm_ptr(&hps_pm_ops),
.acpi_match_table = ACPI_PTR(hps_acpi_id),
},
};
diff --git a/drivers/platform/chrome/cros_kbd_led_backlight.c b/drivers/platform/chrome/cros_kbd_led_backlight.c
index 793fd3f1015d..b83e4f328620 100644
--- a/drivers/platform/chrome/cros_kbd_led_backlight.c
+++ b/drivers/platform/chrome/cros_kbd_led_backlight.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/leds.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_data/cros_ec_commands.h>
@@ -247,17 +248,23 @@ static const struct of_device_id keyboard_led_of_match[] = {
MODULE_DEVICE_TABLE(of, keyboard_led_of_match);
#endif
+static const struct platform_device_id keyboard_led_id[] = {
+ { "cros-keyboard-leds", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, keyboard_led_id);
+
static struct platform_driver keyboard_led_driver = {
.driver = {
- .name = "chromeos-keyboard-leds",
+ .name = "cros-keyboard-leds",
.acpi_match_table = ACPI_PTR(keyboard_led_acpi_match),
.of_match_table = of_match_ptr(keyboard_led_of_match),
},
.probe = keyboard_led_probe,
+ .id_table = keyboard_led_id,
};
module_platform_driver(keyboard_led_driver);
MODULE_AUTHOR("Simon Que <sque@chromium.org>");
MODULE_DESCRIPTION("ChromeOS Keyboard backlight LED Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:chromeos-keyboard-leds");
diff --git a/drivers/platform/chrome/cros_usbpd_logger.c b/drivers/platform/chrome/cros_usbpd_logger.c
index f618757f8b32..930c2f47269f 100644
--- a/drivers/platform/chrome/cros_usbpd_logger.c
+++ b/drivers/platform/chrome/cros_usbpd_logger.c
@@ -7,6 +7,7 @@
#include <linux/ktime.h>
#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
@@ -249,6 +250,12 @@ static int __maybe_unused cros_usbpd_logger_suspend(struct device *dev)
static SIMPLE_DEV_PM_OPS(cros_usbpd_logger_pm_ops, cros_usbpd_logger_suspend,
cros_usbpd_logger_resume);
+static const struct platform_device_id cros_usbpd_logger_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_usbpd_logger_id);
+
static struct platform_driver cros_usbpd_logger_driver = {
.driver = {
.name = DRV_NAME,
@@ -256,10 +263,10 @@ static struct platform_driver cros_usbpd_logger_driver = {
},
.probe = cros_usbpd_logger_probe,
.remove_new = cros_usbpd_logger_remove,
+ .id_table = cros_usbpd_logger_id,
};
module_platform_driver(cros_usbpd_logger_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Logging driver for ChromeOS EC USBPD Charger.");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/cros_usbpd_notify.c b/drivers/platform/chrome/cros_usbpd_notify.c
index aacad022f21d..c83f81d86483 100644
--- a/drivers/platform/chrome/cros_usbpd_notify.c
+++ b/drivers/platform/chrome/cros_usbpd_notify.c
@@ -6,6 +6,7 @@
*/
#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_data/cros_usbpd_notify.h>
@@ -218,12 +219,19 @@ static void cros_usbpd_notify_remove_plat(struct platform_device *pdev)
&pdnotify->nb);
}
+static const struct platform_device_id cros_usbpd_notify_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_usbpd_notify_id);
+
static struct platform_driver cros_usbpd_notify_plat_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = cros_usbpd_notify_probe_plat,
.remove_new = cros_usbpd_notify_remove_plat,
+ .id_table = cros_usbpd_notify_id,
};
static int __init cros_usbpd_notify_init(void)
@@ -258,4 +266,3 @@ module_exit(cros_usbpd_notify_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ChromeOS power delivery notifier device");
MODULE_AUTHOR("Jon Flatley <jflat@chromium.org>");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/wilco_ec/Kconfig b/drivers/platform/chrome/wilco_ec/Kconfig
index 49e8530ca0ac..d1648fb099ac 100644
--- a/drivers/platform/chrome/wilco_ec/Kconfig
+++ b/drivers/platform/chrome/wilco_ec/Kconfig
@@ -3,6 +3,7 @@ config WILCO_EC
tristate "ChromeOS Wilco Embedded Controller"
depends on X86 || COMPILE_TEST
depends on ACPI && CROS_EC_LPC && LEDS_CLASS
+ depends on HAS_IOPORT
help
If you say Y here, you get support for talking to the ChromeOS
Wilco EC over an eSPI bus. This uses a simple byte-level protocol
diff --git a/drivers/platform/chrome/wilco_ec/core.c b/drivers/platform/chrome/wilco_ec/core.c
index 9b59a1bed286..3e6b6cd81a9b 100644
--- a/drivers/platform/chrome/wilco_ec/core.c
+++ b/drivers/platform/chrome/wilco_ec/core.c
@@ -10,6 +10,7 @@
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/ioport.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/wilco-ec.h>
#include <linux/platform_device.h>
@@ -150,6 +151,12 @@ static const struct acpi_device_id wilco_ec_acpi_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, wilco_ec_acpi_device_ids);
+static const struct platform_device_id wilco_ec_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, wilco_ec_id);
+
static struct platform_driver wilco_ec_driver = {
.driver = {
.name = DRV_NAME,
@@ -157,6 +164,7 @@ static struct platform_driver wilco_ec_driver = {
},
.probe = wilco_ec_probe,
.remove_new = wilco_ec_remove,
+ .id_table = wilco_ec_id,
};
module_platform_driver(wilco_ec_driver);
@@ -165,4 +173,3 @@ MODULE_AUTHOR("Nick Crews <ncrews@chromium.org>");
MODULE_AUTHOR("Duncan Laurie <dlaurie@chromium.org>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ChromeOS Wilco Embedded Controller driver");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/wilco_ec/debugfs.c b/drivers/platform/chrome/wilco_ec/debugfs.c
index 93c11f81ca45..983f2fa44ba5 100644
--- a/drivers/platform/chrome/wilco_ec/debugfs.c
+++ b/drivers/platform/chrome/wilco_ec/debugfs.c
@@ -10,6 +10,7 @@
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/wilco-ec.h>
#include <linux/platform_device.h>
@@ -265,17 +266,23 @@ static void wilco_ec_debugfs_remove(struct platform_device *pdev)
debugfs_remove_recursive(debug_info->dir);
}
+static const struct platform_device_id wilco_ec_debugfs_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, wilco_ec_debugfs_id);
+
static struct platform_driver wilco_ec_debugfs_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = wilco_ec_debugfs_probe,
.remove_new = wilco_ec_debugfs_remove,
+ .id_table = wilco_ec_debugfs_id,
};
module_platform_driver(wilco_ec_debugfs_driver);
-MODULE_ALIAS("platform:" DRV_NAME);
MODULE_AUTHOR("Nick Crews <ncrews@chromium.org>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Wilco EC debugfs driver");
diff --git a/drivers/platform/chrome/wilco_ec/event.c b/drivers/platform/chrome/wilco_ec/event.c
index 13291fb4214e..bd1fb53ba028 100644
--- a/drivers/platform/chrome/wilco_ec/event.c
+++ b/drivers/platform/chrome/wilco_ec/event.c
@@ -523,7 +523,6 @@ static struct acpi_driver event_driver = {
.notify = event_device_notify,
.remove = event_device_remove,
},
- .owner = THIS_MODULE,
};
static int __init event_module_init(void)
@@ -575,4 +574,3 @@ module_exit(event_module_exit);
MODULE_AUTHOR("Nick Crews <ncrews@chromium.org>");
MODULE_DESCRIPTION("Wilco EC ACPI event driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/wilco_ec/sysfs.c b/drivers/platform/chrome/wilco_ec/sysfs.c
index 893c59dde32a..d44c43559621 100644
--- a/drivers/platform/chrome/wilco_ec/sysfs.c
+++ b/drivers/platform/chrome/wilco_ec/sysfs.c
@@ -192,7 +192,7 @@ static ssize_t usb_charge_show(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", rs.val);
+ return sysfs_emit(buf, "%d\n", rs.val);
}
static ssize_t usb_charge_store(struct device *dev,
diff --git a/drivers/platform/chrome/wilco_ec/telemetry.c b/drivers/platform/chrome/wilco_ec/telemetry.c
index b7c616f3d179..21d4cbbb009a 100644
--- a/drivers/platform/chrome/wilco_ec/telemetry.c
+++ b/drivers/platform/chrome/wilco_ec/telemetry.c
@@ -30,6 +30,7 @@
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/fs.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/wilco-ec.h>
#include <linux/platform_device.h>
@@ -409,12 +410,19 @@ static void telem_device_remove(struct platform_device *pdev)
put_device(&dev_data->dev);
}
+static const struct platform_device_id telem_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, telem_id);
+
static struct platform_driver telem_driver = {
.probe = telem_device_probe,
.remove_new = telem_device_remove,
.driver = {
.name = DRV_NAME,
},
+ .id_table = telem_id,
};
static int __init telem_module_init(void)
@@ -466,4 +474,3 @@ module_exit(telem_module_exit);
MODULE_AUTHOR("Nick Crews <ncrews@chromium.org>");
MODULE_DESCRIPTION("Wilco EC telemetry driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index ee2e164f86b9..38c932df6446 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -598,6 +598,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
.driver_data = &quirk_acer_predator_v4,
},
{
+ .callback = dmi_matched,
+ .ident = "Acer Predator PH18-71",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Predator PH18-71"),
+ },
+ .driver_data = &quirk_acer_predator_v4,
+ },
+ {
.callback = set_force_caps,
.ident = "Acer Aspire Switch 10E SW3-016",
.matches = {
diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
index b456370166b6..b4f49720c87f 100644
--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
+++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
@@ -208,6 +208,15 @@ static const struct dmi_system_id fwbug_list[] = {
DMI_MATCH(DMI_BIOS_VERSION, "03.03"),
}
},
+ {
+ .ident = "Framework Laptop 13 (Phoenix)",
+ .driver_data = &quirk_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Laptop 13 (AMD Ryzen 7040Series)"),
+ DMI_MATCH(DMI_BIOS_VERSION, "03.05"),
+ }
+ },
{}
};
diff --git a/drivers/platform/x86/amd/pmf/Makefile b/drivers/platform/x86/amd/pmf/Makefile
index 6b26e48ce8ad..7d6079b02589 100644
--- a/drivers/platform/x86/amd/pmf/Makefile
+++ b/drivers/platform/x86/amd/pmf/Makefile
@@ -7,4 +7,4 @@
obj-$(CONFIG_AMD_PMF) += amd-pmf.o
amd-pmf-objs := core.o acpi.o sps.o \
auto-mode.o cnqf.o \
- tee-if.o spc.o
+ tee-if.o spc.o pmf-quirks.o
diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c
index d0cf46e2fc8e..1157ec148880 100644
--- a/drivers/platform/x86/amd/pmf/acpi.c
+++ b/drivers/platform/x86/amd/pmf/acpi.c
@@ -343,7 +343,10 @@ static int apmf_if_verify_interface(struct amd_pmf_dev *pdev)
if (err)
return err;
- pdev->supported_func = output.supported_functions;
+ /* only set if not already set by a quirk */
+ if (!pdev->supported_func)
+ pdev->supported_func = output.supported_functions;
+
dev_dbg(pdev->dev, "supported functions:0x%x notifications:0x%x version:%u\n",
output.supported_functions, output.notification_mask, output.version);
@@ -437,7 +440,7 @@ int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev)
status = acpi_walk_resources(ahandle, METHOD_NAME__CRS, apmf_walk_resources, pmf_dev);
if (ACPI_FAILURE(status)) {
- dev_err(pmf_dev->dev, "acpi_walk_resources failed :%d\n", status);
+ dev_dbg(pmf_dev->dev, "acpi_walk_resources failed :%d\n", status);
return -EINVAL;
}
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index 5d4f80698a8b..64e6e34a2a9a 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -445,6 +445,7 @@ static int amd_pmf_probe(struct platform_device *pdev)
mutex_init(&dev->lock);
mutex_init(&dev->update_mutex);
+ amd_pmf_quirks_init(dev);
apmf_acpi_init(dev);
platform_set_drvdata(pdev, dev);
amd_pmf_dbgfs_register(dev);
diff --git a/drivers/platform/x86/amd/pmf/pmf-quirks.c b/drivers/platform/x86/amd/pmf/pmf-quirks.c
new file mode 100644
index 000000000000..0b2eb0ae85fe
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/pmf-quirks.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Platform Management Framework Driver Quirks
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#include <linux/dmi.h>
+
+#include "pmf.h"
+
+struct quirk_entry {
+ u32 supported_func;
+};
+
+static struct quirk_entry quirk_no_sps_bug = {
+ .supported_func = 0x4003,
+};
+
+static const struct dmi_system_id fwbug_list[] = {
+ {
+ .ident = "ROG Zephyrus G14",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GA403UV"),
+ },
+ .driver_data = &quirk_no_sps_bug,
+ },
+ {}
+};
+
+void amd_pmf_quirks_init(struct amd_pmf_dev *dev)
+{
+ const struct dmi_system_id *dmi_id;
+ struct quirk_entry *quirks;
+
+ dmi_id = dmi_first_match(fwbug_list);
+ if (!dmi_id)
+ return;
+
+ quirks = dmi_id->driver_data;
+ if (quirks->supported_func) {
+ dev->supported_func = quirks->supported_func;
+ pr_info("Using supported funcs quirk to avoid %s platform firmware bug\n",
+ dmi_id->ident);
+ }
+}
+
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index 8c4df5753f40..eeedd0c0395a 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -720,4 +720,7 @@ int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev);
void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
+/* Quirk infrastructure */
+void amd_pmf_quirks_init(struct amd_pmf_dev *dev);
+
#endif /* PMF_H */
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index bf03ea1b1274..78c42767295a 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -1925,7 +1925,6 @@ MODULE_DEVICE_TABLE(acpi, asus_device_ids);
static struct acpi_driver asus_acpi_driver = {
.name = ASUS_LAPTOP_NAME,
.class = ASUS_LAPTOP_CLASS,
- .owner = THIS_MODULE,
.ids = asus_device_ids,
.flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 2edaea2492df..87462e7c6219 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -434,7 +434,6 @@ static const struct acpi_device_id cmpc_accel_device_ids_v4[] = {
};
static struct acpi_driver cmpc_accel_acpi_driver_v4 = {
- .owner = THIS_MODULE,
.name = "cmpc_accel_v4",
.class = "cmpc_accel_v4",
.ids = cmpc_accel_device_ids_v4,
@@ -660,7 +659,6 @@ static const struct acpi_device_id cmpc_accel_device_ids[] = {
};
static struct acpi_driver cmpc_accel_acpi_driver = {
- .owner = THIS_MODULE,
.name = "cmpc_accel",
.class = "cmpc_accel",
.ids = cmpc_accel_device_ids,
@@ -754,7 +752,6 @@ static const struct acpi_device_id cmpc_tablet_device_ids[] = {
};
static struct acpi_driver cmpc_tablet_acpi_driver = {
- .owner = THIS_MODULE,
.name = "cmpc_tablet",
.class = "cmpc_tablet",
.ids = cmpc_tablet_device_ids,
@@ -996,7 +993,6 @@ static const struct acpi_device_id cmpc_ipml_device_ids[] = {
};
static struct acpi_driver cmpc_ipml_acpi_driver = {
- .owner = THIS_MODULE,
.name = "cmpc",
.class = "cmpc",
.ids = cmpc_ipml_device_ids,
@@ -1064,7 +1060,6 @@ static const struct acpi_device_id cmpc_keys_device_ids[] = {
};
static struct acpi_driver cmpc_keys_acpi_driver = {
- .owner = THIS_MODULE,
.name = "cmpc_keys",
.class = "cmpc_keys",
.ids = cmpc_keys_device_ids,
diff --git a/drivers/platform/x86/dell/dell-rbtn.c b/drivers/platform/x86/dell/dell-rbtn.c
index c8fcb537fd65..a415c432d4c3 100644
--- a/drivers/platform/x86/dell/dell-rbtn.c
+++ b/drivers/platform/x86/dell/dell-rbtn.c
@@ -295,7 +295,6 @@ static struct acpi_driver rbtn_driver = {
.remove = rbtn_remove,
.notify = rbtn_notify,
},
- .owner = THIS_MODULE,
};
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index ff1b70269ccb..447364bed249 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -1463,7 +1463,6 @@ MODULE_DEVICE_TABLE(acpi, eeepc_device_ids);
static struct acpi_driver eeepc_acpi_driver = {
.name = EEEPC_LAPTOP_NAME,
.class = EEEPC_ACPI_CLASS,
- .owner = THIS_MODULE,
.ids = eeepc_device_ids,
.flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index 7457ca2b27a6..c7a827645864 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -49,6 +49,8 @@ static const struct acpi_device_id intel_hid_ids[] = {
{"INTC1076", 0},
{"INTC1077", 0},
{"INTC1078", 0},
+ {"INTC107B", 0},
+ {"INTC10CB", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, intel_hid_ids);
@@ -504,6 +506,7 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
struct platform_device *device = context;
struct intel_hid_priv *priv = dev_get_drvdata(&device->dev);
unsigned long long ev_index;
+ struct key_entry *ke;
int err;
/*
@@ -545,11 +548,15 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
if (event == 0xc0 || !priv->array)
return;
- if (!sparse_keymap_entry_from_scancode(priv->array, event)) {
+ ke = sparse_keymap_entry_from_scancode(priv->array, event);
+ if (!ke) {
dev_info(&device->dev, "unknown event 0x%x\n", event);
return;
}
+ if (ke->type == KE_IGNORE)
+ return;
+
wakeup:
pm_wakeup_hard_event(&device->dev);
diff --git a/drivers/platform/x86/intel/rst.c b/drivers/platform/x86/intel/rst.c
index 35814a7707af..6bc9c4a603e0 100644
--- a/drivers/platform/x86/intel/rst.c
+++ b/drivers/platform/x86/intel/rst.c
@@ -125,7 +125,6 @@ static const struct acpi_device_id irst_ids[] = {
};
static struct acpi_driver irst_driver = {
- .owner = THIS_MODULE,
.name = "intel_rapid_start",
.class = "intel_rapid_start",
.ids = irst_ids,
diff --git a/drivers/platform/x86/intel/smartconnect.c b/drivers/platform/x86/intel/smartconnect.c
index 64c2dc93472f..cd25d0585324 100644
--- a/drivers/platform/x86/intel/smartconnect.c
+++ b/drivers/platform/x86/intel/smartconnect.c
@@ -32,7 +32,6 @@ static const struct acpi_device_id smartconnect_ids[] = {
MODULE_DEVICE_TABLE(acpi, smartconnect_ids);
static struct acpi_driver smartconnect_driver = {
- .owner = THIS_MODULE,
.name = "intel_smart_connect",
.class = "intel_smart_connect",
.ids = smartconnect_ids,
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index 08df9494603c..1accdaaf282c 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -719,7 +719,9 @@ static struct miscdevice isst_if_char_driver = {
};
static const struct x86_cpu_id hpm_cpu_ids[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, NULL),
X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, NULL),
{}
};
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
index bd75d61ff8a6..ef730200a04b 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
@@ -29,7 +29,7 @@
#include "uncore-frequency-common.h"
#define UNCORE_MAJOR_VERSION 0
-#define UNCORE_MINOR_VERSION 1
+#define UNCORE_MINOR_VERSION 2
#define UNCORE_HEADER_INDEX 0
#define UNCORE_FABRIC_CLUSTER_OFFSET 8
@@ -329,7 +329,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
goto remove_clusters;
}
- if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) != UNCORE_MINOR_VERSION)
+ if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) > UNCORE_MINOR_VERSION)
dev_info(&auxdev->dev, "Uncore: Ignore: Unsupported minor version:%lx\n",
TPMI_MINOR_VERSION(pd_info->ufs_header_ver));
diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
index 084c355c86f5..79bb2c801daa 100644
--- a/drivers/platform/x86/intel/vbtn.c
+++ b/drivers/platform/x86/intel/vbtn.c
@@ -136,8 +136,6 @@ static int intel_vbtn_input_setup(struct platform_device *device)
priv->switches_dev->id.bustype = BUS_HOST;
if (priv->has_switches) {
- detect_tablet_mode(&device->dev);
-
ret = input_register_device(priv->switches_dev);
if (ret)
return ret;
@@ -258,9 +256,6 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
static bool intel_vbtn_has_switches(acpi_handle handle, bool dual_accel)
{
- unsigned long long vgbs;
- acpi_status status;
-
/* See dual_accel_detect.h for more info */
if (dual_accel)
return false;
@@ -268,8 +263,7 @@ static bool intel_vbtn_has_switches(acpi_handle handle, bool dual_accel)
if (!dmi_check_system(dmi_switches_allow_list))
return false;
- status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs);
- return ACPI_SUCCESS(status);
+ return acpi_has_method(handle, "VGBS");
}
static int intel_vbtn_probe(struct platform_device *device)
@@ -316,6 +310,9 @@ static int intel_vbtn_probe(struct platform_device *device)
if (ACPI_FAILURE(status))
dev_err(&device->dev, "Error VBDL failed with ACPI status %d\n", status);
}
+ // Check switches after buttons since VBDL may have side effects.
+ if (has_switches)
+ detect_tablet_mode(&device->dev);
device_init_wakeup(&device->dev, true);
/*
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index ad3c39e9e9f5..d0fee5d375d7 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -736,7 +736,7 @@ static int acpi_add(struct acpi_device *device)
default:
year = 2019;
}
- pr_info("product: %s year: %d\n", product, year);
+ pr_info("product: %s year: %d\n", product ?: "unknown", year);
if (year >= 2019)
battery_limit_use_wmbb = 1;
@@ -790,7 +790,6 @@ static struct acpi_driver acpi_driver = {
.remove = acpi_remove,
.notify = acpi_notify,
},
- .owner = THIS_MODULE,
};
static int __init acpi_init(void)
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 40878e327afd..3e94fdd1ea52 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -3303,7 +3303,6 @@ static struct acpi_driver sony_nc_driver = {
.name = SONY_NC_DRIVER_NAME,
.class = SONY_NC_CLASS,
.ids = sony_nc_device_ids,
- .owner = THIS_MODULE,
.ops = {
.add = sony_nc_add,
.remove = sony_nc_remove,
@@ -4844,7 +4843,6 @@ static struct acpi_driver sony_pic_driver = {
.name = SONY_PIC_DRIVER_NAME,
.class = SONY_PIC_CLASS,
.ids = sony_pic_device_ids,
- .owner = THIS_MODULE,
.ops = {
.add = sony_pic_add,
.remove = sony_pic_remove,
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 291f14ef6702..d5b3e22cfa78 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -264,6 +264,7 @@ static const struct key_entry toshiba_acpi_keymap[] = {
{ KE_KEY, 0xb32, { KEY_NEXTSONG } },
{ KE_KEY, 0xb33, { KEY_PLAYPAUSE } },
{ KE_KEY, 0xb5a, { KEY_MEDIA } },
+ { KE_IGNORE, 0x0e00, { KEY_RESERVED } }, /* Wake from sleep */
{ KE_IGNORE, 0x1430, { KEY_RESERVED } }, /* Wake from sleep */
{ KE_IGNORE, 0x1501, { KEY_RESERVED } }, /* Output changed */
{ KE_IGNORE, 0x1502, { KEY_RESERVED } }, /* HDMI plugged/unplugged */
@@ -3523,9 +3524,10 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
(dev->kbd_mode == SCI_KBD_MODE_ON) ?
LED_FULL : LED_OFF);
break;
+ case 0x8e: /* Power button pressed */
+ break;
case 0x85: /* Unknown */
case 0x8d: /* Unknown */
- case 0x8e: /* Unknown */
case 0x94: /* Unknown */
case 0x95: /* Unknown */
default:
@@ -3581,7 +3583,6 @@ static SIMPLE_DEV_PM_OPS(toshiba_acpi_pm,
static struct acpi_driver toshiba_acpi_driver = {
.name = "Toshiba ACPI driver",
- .owner = THIS_MODULE,
.ids = toshiba_device_ids,
.flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index d8f81962a240..dad2c3e55904 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -59,7 +59,6 @@ static struct acpi_driver toshiba_bt_rfkill_driver = {
.remove = toshiba_bt_rfkill_remove,
.notify = toshiba_bt_rfkill_notify,
},
- .owner = THIS_MODULE,
.drv.pm = &toshiba_bt_pm,
};
diff --git a/drivers/platform/x86/toshiba_haps.c b/drivers/platform/x86/toshiba_haps.c
index 8c9f76286b08..03dfddeee0c0 100644
--- a/drivers/platform/x86/toshiba_haps.c
+++ b/drivers/platform/x86/toshiba_haps.c
@@ -251,7 +251,6 @@ MODULE_DEVICE_TABLE(acpi, haps_device_ids);
static struct acpi_driver toshiba_haps_driver = {
.name = "Toshiba HAPS",
- .owner = THIS_MODULE,
.ids = haps_device_ids,
.flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
diff --git a/drivers/platform/x86/wireless-hotkey.c b/drivers/platform/x86/wireless-hotkey.c
index 4422863f47bb..e95cdbbfb708 100644
--- a/drivers/platform/x86/wireless-hotkey.c
+++ b/drivers/platform/x86/wireless-hotkey.c
@@ -110,7 +110,6 @@ static void wl_remove(struct acpi_device *device)
static struct acpi_driver wl_driver = {
.name = "wireless-hotkey",
- .owner = THIS_MODULE,
.ids = wl_ids,
.ops = {
.add = wl_add,
diff --git a/drivers/pnp/isapnp/Kconfig b/drivers/pnp/isapnp/Kconfig
index 8b5f2e461a80..8e5dec59e342 100644
--- a/drivers/pnp/isapnp/Kconfig
+++ b/drivers/pnp/isapnp/Kconfig
@@ -4,7 +4,7 @@
#
config ISAPNP
bool "ISA Plug and Play support"
- depends on ISA || COMPILE_TEST
+ depends on ISA || (HAS_IOPORT && COMPILE_TEST)
help
Say Y here if you would like support for ISA Plug and Play devices.
Some information is in <file:Documentation/userspace-api/isapnp.rst>.
diff --git a/drivers/power/supply/mt6360_charger.c b/drivers/power/supply/mt6360_charger.c
index 1305cba61edd..aca123783efc 100644
--- a/drivers/power/supply/mt6360_charger.c
+++ b/drivers/power/supply/mt6360_charger.c
@@ -588,7 +588,7 @@ static const struct regulator_ops mt6360_chg_otg_ops = {
};
static const struct regulator_desc mt6360_otg_rdesc = {
- .of_match = "usb-otg-vbus",
+ .of_match = "usb-otg-vbus-regulator",
.name = "usb-otg-vbus",
.ops = &mt6360_chg_otg_ops,
.owner = THIS_MODULE,
diff --git a/drivers/power/supply/rt9455_charger.c b/drivers/power/supply/rt9455_charger.c
index c345a77f9f78..e4dbacd50a43 100644
--- a/drivers/power/supply/rt9455_charger.c
+++ b/drivers/power/supply/rt9455_charger.c
@@ -192,6 +192,7 @@ static const int rt9455_voreg_values[] = {
4450000, 4450000, 4450000, 4450000, 4450000, 4450000, 4450000, 4450000
};
+#if IS_ENABLED(CONFIG_USB_PHY)
/*
* When the charger is in boost mode, REG02[7:2] represent boost output
* voltage.
@@ -207,6 +208,7 @@ static const int rt9455_boost_voltage_values[] = {
5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000,
5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000,
};
+#endif
/* REG07[3:0] (VMREG) in uV */
static const int rt9455_vmreg_values[] = {
diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
index bc90126f1b5f..6b6f51b21550 100644
--- a/drivers/powercap/dtpm_cpu.c
+++ b/drivers/powercap/dtpm_cpu.c
@@ -43,13 +43,11 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm);
struct em_perf_domain *pd = em_cpu_get(dtpm_cpu->cpu);
struct em_perf_state *table;
- struct cpumask cpus;
unsigned long freq;
u64 power;
int i, nr_cpus;
- cpumask_and(&cpus, cpu_online_mask, to_cpumask(pd->cpus));
- nr_cpus = cpumask_weight(&cpus);
+ nr_cpus = cpumask_weight_and(cpu_online_mask, to_cpumask(pd->cpus));
rcu_read_lock();
table = em_perf_state_from_pd(pd);
@@ -123,11 +121,9 @@ static int update_pd_power_uw(struct dtpm *dtpm)
struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm);
struct em_perf_domain *em = em_cpu_get(dtpm_cpu->cpu);
struct em_perf_state *table;
- struct cpumask cpus;
int nr_cpus;
- cpumask_and(&cpus, cpu_online_mask, to_cpumask(em->cpus));
- nr_cpus = cpumask_weight(&cpus);
+ nr_cpus = cpumask_weight_and(cpu_online_mask, to_cpumask(em->cpus));
rcu_read_lock();
table = em_perf_state_from_pd(em);
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index a28d54fd5222..aac0744011a3 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -5,27 +5,29 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitmap.h>
#include <linux/cleanup.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/intel_rapl.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/list.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/slab.h>
#include <linux/log2.h>
-#include <linux/bitmap.h>
-#include <linux/delay.h>
-#include <linux/sysfs.h>
-#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/nospec.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
#include <linux/powercap.h>
-#include <linux/suspend.h>
-#include <linux/intel_rapl.h>
#include <linux/processor.h>
-#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
-#include <asm/iosf_mbi.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+#include <asm/iosf_mbi.h>
/* bitmasks for RAPL MSRs, used by primitive access functions */
#define ENERGY_STATUS_MASK 0xffffffff
@@ -1263,6 +1265,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &rapl_defaults_spr_server),
X86_MATCH_INTEL_FAM6_MODEL(LUNARLAKE_M, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE_H, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(LAKEFIELD, &rapl_defaults_core),
@@ -1506,6 +1509,586 @@ static int rapl_detect_domains(struct rapl_package *rp)
return 0;
}
+#ifdef CONFIG_PERF_EVENTS
+
+/*
+ * Support for RAPL PMU
+ *
+ * Register a PMU if any of the registered RAPL Packages have the requirement
+ * of exposing its energy counters via Perf PMU.
+ *
+ * PMU Name:
+ * power
+ *
+ * Events:
+ * Name Event id RAPL Domain
+ * energy_cores 0x01 RAPL_DOMAIN_PP0
+ * energy_pkg 0x02 RAPL_DOMAIN_PACKAGE
+ * energy_ram 0x03 RAPL_DOMAIN_DRAM
+ * energy_gpu 0x04 RAPL_DOMAIN_PP1
+ * energy_psys 0x05 RAPL_DOMAIN_PLATFORM
+ *
+ * Unit:
+ * Joules
+ *
+ * Scale:
+ * 2.3283064365386962890625e-10
+ * The same RAPL domain in different RAPL Packages may have different
+ * energy units. Use 2.3283064365386962890625e-10 (2^-32) Joules as
+ * the fixed unit for all energy counters, and covert each hardware
+ * counter increase to N times of PMU event counter increases.
+ *
+ * This is fully compatible with the current MSR RAPL PMU. This means that
+ * userspace programs like turbostat can use the same code to handle RAPL Perf
+ * PMU, no matter what RAPL Interface driver (MSR/TPMI, etc) is running
+ * underlying on the platform.
+ *
+ * Note that RAPL Packages can be probed/removed dynamically, and the events
+ * supported by each TPMI RAPL device can be different. Thus the RAPL PMU
+ * support is done on demand, which means
+ * 1. PMU is registered only if it is needed by a RAPL Package. PMU events for
+ * unsupported counters are not exposed.
+ * 2. PMU is unregistered and registered when a new RAPL Package is probed and
+ * supports new counters that are not supported by current PMU.
+ * 3. PMU is unregistered when all registered RAPL Packages don't need PMU.
+ */
+
+struct rapl_pmu {
+ struct pmu pmu; /* Perf PMU structure */
+ u64 timer_ms; /* Maximum expiration time to avoid counter overflow */
+ unsigned long domain_map; /* Events supported by current registered PMU */
+ bool registered; /* Whether the PMU has been registered or not */
+};
+
+static struct rapl_pmu rapl_pmu;
+
+/* PMU helpers */
+
+static int get_pmu_cpu(struct rapl_package *rp)
+{
+ int cpu;
+
+ if (!rp->has_pmu)
+ return nr_cpu_ids;
+
+ /* Only TPMI RAPL is supported for now */
+ if (rp->priv->type != RAPL_IF_TPMI)
+ return nr_cpu_ids;
+
+ /* TPMI RAPL uses any CPU in the package for PMU */
+ for_each_online_cpu(cpu)
+ if (topology_physical_package_id(cpu) == rp->id)
+ return cpu;
+
+ return nr_cpu_ids;
+}
+
+static bool is_rp_pmu_cpu(struct rapl_package *rp, int cpu)
+{
+ if (!rp->has_pmu)
+ return false;
+
+ /* Only TPMI RAPL is supported for now */
+ if (rp->priv->type != RAPL_IF_TPMI)
+ return false;
+
+ /* TPMI RAPL uses any CPU in the package for PMU */
+ return topology_physical_package_id(cpu) == rp->id;
+}
+
+static struct rapl_package_pmu_data *event_to_pmu_data(struct perf_event *event)
+{
+ struct rapl_package *rp = event->pmu_private;
+
+ return &rp->pmu_data;
+}
+
+/* PMU event callbacks */
+
+static u64 event_read_counter(struct perf_event *event)
+{
+ struct rapl_package *rp = event->pmu_private;
+ u64 val;
+ int ret;
+
+ /* Return 0 for unsupported events */
+ if (event->hw.idx < 0)
+ return 0;
+
+ ret = rapl_read_data_raw(&rp->domains[event->hw.idx], ENERGY_COUNTER, false, &val);
+
+ /* Return 0 for failed read */
+ if (ret)
+ return 0;
+
+ return val;
+}
+
+static void __rapl_pmu_event_start(struct perf_event *event)
+{
+ struct rapl_package_pmu_data *data = event_to_pmu_data(event);
+
+ if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+ return;
+
+ event->hw.state = 0;
+
+ list_add_tail(&event->active_entry, &data->active_list);
+
+ local64_set(&event->hw.prev_count, event_read_counter(event));
+ if (++data->n_active == 1)
+ hrtimer_start(&data->hrtimer, data->timer_interval,
+ HRTIMER_MODE_REL_PINNED);
+}
+
+static void rapl_pmu_event_start(struct perf_event *event, int mode)
+{
+ struct rapl_package_pmu_data *data = event_to_pmu_data(event);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&data->lock, flags);
+ __rapl_pmu_event_start(event);
+ raw_spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static u64 rapl_event_update(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct rapl_package_pmu_data *data = event_to_pmu_data(event);
+ u64 prev_raw_count, new_raw_count;
+ s64 delta, sdelta;
+
+ /*
+ * Follow the generic code to drain hwc->prev_count.
+ * The loop is not expected to run for multiple times.
+ */
+ prev_raw_count = local64_read(&hwc->prev_count);
+ do {
+ new_raw_count = event_read_counter(event);
+ } while (!local64_try_cmpxchg(&hwc->prev_count,
+ &prev_raw_count, new_raw_count));
+
+
+ /*
+ * Now we have the new raw value and have updated the prev
+ * timestamp already. We can now calculate the elapsed delta
+ * (event-)time and add that to the generic event.
+ */
+ delta = new_raw_count - prev_raw_count;
+
+ /*
+ * Scale delta to smallest unit (2^-32)
+ * users must then scale back: count * 1/(1e9*2^32) to get Joules
+ * or use ldexp(count, -32).
+ * Watts = Joules/Time delta
+ */
+ sdelta = delta * data->scale[event->hw.flags];
+
+ local64_add(sdelta, &event->count);
+
+ return new_raw_count;
+}
+
+static void rapl_pmu_event_stop(struct perf_event *event, int mode)
+{
+ struct rapl_package_pmu_data *data = event_to_pmu_data(event);
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&data->lock, flags);
+
+ /* Mark event as deactivated and stopped */
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ WARN_ON_ONCE(data->n_active <= 0);
+ if (--data->n_active == 0)
+ hrtimer_cancel(&data->hrtimer);
+
+ list_del(&event->active_entry);
+
+ WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+ hwc->state |= PERF_HES_STOPPED;
+ }
+
+ /* Check if update of sw counter is necessary */
+ if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+ /*
+ * Drain the remaining delta count out of a event
+ * that we are disabling:
+ */
+ rapl_event_update(event);
+ hwc->state |= PERF_HES_UPTODATE;
+ }
+
+ raw_spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static int rapl_pmu_event_add(struct perf_event *event, int mode)
+{
+ struct rapl_package_pmu_data *data = event_to_pmu_data(event);
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&data->lock, flags);
+
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ if (mode & PERF_EF_START)
+ __rapl_pmu_event_start(event);
+
+ raw_spin_unlock_irqrestore(&data->lock, flags);
+
+ return 0;
+}
+
+static void rapl_pmu_event_del(struct perf_event *event, int flags)
+{
+ rapl_pmu_event_stop(event, PERF_EF_UPDATE);
+}
+
+/* RAPL PMU event ids, same as shown in sysfs */
+enum perf_rapl_events {
+ PERF_RAPL_PP0 = 1, /* all cores */
+ PERF_RAPL_PKG, /* entire package */
+ PERF_RAPL_RAM, /* DRAM */
+ PERF_RAPL_PP1, /* gpu */
+ PERF_RAPL_PSYS, /* psys */
+ PERF_RAPL_MAX
+};
+#define RAPL_EVENT_MASK GENMASK(7, 0)
+
+static const int event_to_domain[PERF_RAPL_MAX] = {
+ [PERF_RAPL_PP0] = RAPL_DOMAIN_PP0,
+ [PERF_RAPL_PKG] = RAPL_DOMAIN_PACKAGE,
+ [PERF_RAPL_RAM] = RAPL_DOMAIN_DRAM,
+ [PERF_RAPL_PP1] = RAPL_DOMAIN_PP1,
+ [PERF_RAPL_PSYS] = RAPL_DOMAIN_PLATFORM,
+};
+
+static int rapl_pmu_event_init(struct perf_event *event)
+{
+ struct rapl_package *pos, *rp = NULL;
+ u64 cfg = event->attr.config & RAPL_EVENT_MASK;
+ int domain, idx;
+
+ /* Only look at RAPL events */
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* Check for supported events only */
+ if (!cfg || cfg >= PERF_RAPL_MAX)
+ return -EINVAL;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ /* Find out which Package the event belongs to */
+ list_for_each_entry(pos, &rapl_packages, plist) {
+ if (is_rp_pmu_cpu(pos, event->cpu)) {
+ rp = pos;
+ break;
+ }
+ }
+ if (!rp)
+ return -ENODEV;
+
+ /* Find out which RAPL Domain the event belongs to */
+ domain = event_to_domain[cfg];
+
+ event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
+ event->pmu_private = rp; /* Which package */
+ event->hw.flags = domain; /* Which domain */
+
+ event->hw.idx = -1;
+ /* Find out the index in rp->domains[] to get domain pointer */
+ for (idx = 0; idx < rp->nr_domains; idx++) {
+ if (rp->domains[idx].id == domain) {
+ event->hw.idx = idx;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void rapl_pmu_event_read(struct perf_event *event)
+{
+ rapl_event_update(event);
+}
+
+static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
+{
+ struct rapl_package_pmu_data *data =
+ container_of(hrtimer, struct rapl_package_pmu_data, hrtimer);
+ struct perf_event *event;
+ unsigned long flags;
+
+ if (!data->n_active)
+ return HRTIMER_NORESTART;
+
+ raw_spin_lock_irqsave(&data->lock, flags);
+
+ list_for_each_entry(event, &data->active_list, active_entry)
+ rapl_event_update(event);
+
+ raw_spin_unlock_irqrestore(&data->lock, flags);
+
+ hrtimer_forward_now(hrtimer, data->timer_interval);
+
+ return HRTIMER_RESTART;
+}
+
+/* PMU sysfs attributes */
+
+/*
+ * There are no default events, but we need to create "events" group (with
+ * empty attrs) before updating it with detected events.
+ */
+static struct attribute *attrs_empty[] = {
+ NULL,
+};
+
+static struct attribute_group pmu_events_group = {
+ .name = "events",
+ .attrs = attrs_empty,
+};
+
+static ssize_t cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rapl_package *rp;
+ cpumask_var_t cpu_mask;
+ int cpu;
+ int ret;
+
+ if (!alloc_cpumask_var(&cpu_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpus_read_lock();
+
+ cpumask_clear(cpu_mask);
+
+ /* Choose a cpu for each RAPL Package */
+ list_for_each_entry(rp, &rapl_packages, plist) {
+ cpu = get_pmu_cpu(rp);
+ if (cpu < nr_cpu_ids)
+ cpumask_set_cpu(cpu, cpu_mask);
+ }
+ cpus_read_unlock();
+
+ ret = cpumap_print_to_pagebuf(true, buf, cpu_mask);
+
+ free_cpumask_var(cpu_mask);
+
+ return ret;
+}
+
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static struct attribute_group pmu_cpumask_group = {
+ .attrs = pmu_cpumask_attrs,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-7");
+static struct attribute *pmu_format_attr[] = {
+ &format_attr_event.attr,
+ NULL
+};
+
+static struct attribute_group pmu_format_group = {
+ .name = "format",
+ .attrs = pmu_format_attr,
+};
+
+static const struct attribute_group *pmu_attr_groups[] = {
+ &pmu_events_group,
+ &pmu_cpumask_group,
+ &pmu_format_group,
+ NULL
+};
+
+#define RAPL_EVENT_ATTR_STR(_name, v, str) \
+static struct perf_pmu_events_attr event_attr_##v = { \
+ .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
+ .event_str = str, \
+}
+
+RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
+RAPL_EVENT_ATTR_STR(energy-pkg, rapl_pkg, "event=0x02");
+RAPL_EVENT_ATTR_STR(energy-ram, rapl_ram, "event=0x03");
+RAPL_EVENT_ATTR_STR(energy-gpu, rapl_gpu, "event=0x04");
+RAPL_EVENT_ATTR_STR(energy-psys, rapl_psys, "event=0x05");
+
+RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_unit_cores, "Joules");
+RAPL_EVENT_ATTR_STR(energy-pkg.unit, rapl_unit_pkg, "Joules");
+RAPL_EVENT_ATTR_STR(energy-ram.unit, rapl_unit_ram, "Joules");
+RAPL_EVENT_ATTR_STR(energy-gpu.unit, rapl_unit_gpu, "Joules");
+RAPL_EVENT_ATTR_STR(energy-psys.unit, rapl_unit_psys, "Joules");
+
+RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_scale_cores, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-pkg.scale, rapl_scale_pkg, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_scale_ram, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_scale_gpu, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-psys.scale, rapl_scale_psys, "2.3283064365386962890625e-10");
+
+#define RAPL_EVENT_GROUP(_name, domain) \
+static struct attribute *pmu_attr_##_name[] = { \
+ &event_attr_rapl_##_name.attr.attr, \
+ &event_attr_rapl_unit_##_name.attr.attr, \
+ &event_attr_rapl_scale_##_name.attr.attr, \
+ NULL \
+}; \
+static umode_t is_visible_##_name(struct kobject *kobj, struct attribute *attr, int event) \
+{ \
+ return rapl_pmu.domain_map & BIT(domain) ? attr->mode : 0; \
+} \
+static struct attribute_group pmu_group_##_name = { \
+ .name = "events", \
+ .attrs = pmu_attr_##_name, \
+ .is_visible = is_visible_##_name, \
+}
+
+RAPL_EVENT_GROUP(cores, RAPL_DOMAIN_PP0);
+RAPL_EVENT_GROUP(pkg, RAPL_DOMAIN_PACKAGE);
+RAPL_EVENT_GROUP(ram, RAPL_DOMAIN_DRAM);
+RAPL_EVENT_GROUP(gpu, RAPL_DOMAIN_PP1);
+RAPL_EVENT_GROUP(psys, RAPL_DOMAIN_PLATFORM);
+
+static const struct attribute_group *pmu_attr_update[] = {
+ &pmu_group_cores,
+ &pmu_group_pkg,
+ &pmu_group_ram,
+ &pmu_group_gpu,
+ &pmu_group_psys,
+ NULL
+};
+
+static int rapl_pmu_update(struct rapl_package *rp)
+{
+ int ret = 0;
+
+ /* Return if PMU already covers all events supported by current RAPL Package */
+ if (rapl_pmu.registered && !(rp->domain_map & (~rapl_pmu.domain_map)))
+ goto end;
+
+ /* Unregister previous registered PMU */
+ if (rapl_pmu.registered)
+ perf_pmu_unregister(&rapl_pmu.pmu);
+
+ rapl_pmu.registered = false;
+ rapl_pmu.domain_map |= rp->domain_map;
+
+ memset(&rapl_pmu.pmu, 0, sizeof(struct pmu));
+ rapl_pmu.pmu.attr_groups = pmu_attr_groups;
+ rapl_pmu.pmu.attr_update = pmu_attr_update;
+ rapl_pmu.pmu.task_ctx_nr = perf_invalid_context;
+ rapl_pmu.pmu.event_init = rapl_pmu_event_init;
+ rapl_pmu.pmu.add = rapl_pmu_event_add;
+ rapl_pmu.pmu.del = rapl_pmu_event_del;
+ rapl_pmu.pmu.start = rapl_pmu_event_start;
+ rapl_pmu.pmu.stop = rapl_pmu_event_stop;
+ rapl_pmu.pmu.read = rapl_pmu_event_read;
+ rapl_pmu.pmu.module = THIS_MODULE;
+ rapl_pmu.pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT;
+ ret = perf_pmu_register(&rapl_pmu.pmu, "power", -1);
+ if (ret) {
+ pr_info("Failed to register PMU\n");
+ return ret;
+ }
+
+ rapl_pmu.registered = true;
+end:
+ rp->has_pmu = true;
+ return ret;
+}
+
+int rapl_package_add_pmu(struct rapl_package *rp)
+{
+ struct rapl_package_pmu_data *data = &rp->pmu_data;
+ int idx;
+
+ if (rp->has_pmu)
+ return -EEXIST;
+
+ guard(cpus_read_lock)();
+
+ for (idx = 0; idx < rp->nr_domains; idx++) {
+ struct rapl_domain *rd = &rp->domains[idx];
+ int domain = rd->id;
+ u64 val;
+
+ if (!test_bit(domain, &rp->domain_map))
+ continue;
+
+ /*
+ * The RAPL PMU granularity is 2^-32 Joules
+ * data->scale[]: times of 2^-32 Joules for each ENERGY COUNTER increase
+ */
+ val = rd->energy_unit * (1ULL << 32);
+ do_div(val, ENERGY_UNIT_SCALE * 1000000);
+ data->scale[domain] = val;
+
+ if (!rapl_pmu.timer_ms) {
+ struct rapl_primitive_info *rpi = get_rpi(rp, ENERGY_COUNTER);
+
+ /*
+ * Calculate the timer rate:
+ * Use reference of 200W for scaling the timeout to avoid counter
+ * overflows.
+ *
+ * max_count = rpi->mask >> rpi->shift + 1
+ * max_energy_pj = max_count * rd->energy_unit
+ * max_time_sec = (max_energy_pj / 1000000000) / 200w
+ *
+ * rapl_pmu.timer_ms = max_time_sec * 1000 / 2
+ */
+ val = (rpi->mask >> rpi->shift) + 1;
+ val *= rd->energy_unit;
+ do_div(val, 1000000 * 200 * 2);
+ rapl_pmu.timer_ms = val;
+
+ pr_debug("%llu ms overflow timer\n", rapl_pmu.timer_ms);
+ }
+
+ pr_debug("Domain %s: hw unit %lld * 2^-32 Joules\n", rd->name, data->scale[domain]);
+ }
+
+ /* Initialize per package PMU data */
+ raw_spin_lock_init(&data->lock);
+ INIT_LIST_HEAD(&data->active_list);
+ data->timer_interval = ms_to_ktime(rapl_pmu.timer_ms);
+ hrtimer_init(&data->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ data->hrtimer.function = rapl_hrtimer_handle;
+
+ return rapl_pmu_update(rp);
+}
+EXPORT_SYMBOL_GPL(rapl_package_add_pmu);
+
+void rapl_package_remove_pmu(struct rapl_package *rp)
+{
+ struct rapl_package *pos;
+
+ if (!rp->has_pmu)
+ return;
+
+ guard(cpus_read_lock)();
+
+ list_for_each_entry(pos, &rapl_packages, plist) {
+ /* PMU is still needed */
+ if (pos->has_pmu && pos != rp)
+ return;
+ }
+
+ perf_pmu_unregister(&rapl_pmu.pmu);
+ memset(&rapl_pmu, 0, sizeof(struct rapl_pmu));
+}
+EXPORT_SYMBOL_GPL(rapl_package_remove_pmu);
+#endif
+
/* called from CPU hotplug notifier, hotplug lock held */
void rapl_remove_package_cpuslocked(struct rapl_package *rp)
{
diff --git a/drivers/powercap/intel_rapl_tpmi.c b/drivers/powercap/intel_rapl_tpmi.c
index f6b7f085977c..947544e4d229 100644
--- a/drivers/powercap/intel_rapl_tpmi.c
+++ b/drivers/powercap/intel_rapl_tpmi.c
@@ -302,6 +302,8 @@ static int intel_rapl_tpmi_probe(struct auxiliary_device *auxdev,
goto err;
}
+ rapl_package_add_pmu(trp->rp);
+
auxiliary_set_drvdata(auxdev, trp);
return 0;
@@ -314,6 +316,7 @@ static void intel_rapl_tpmi_remove(struct auxiliary_device *auxdev)
{
struct tpmi_rapl_package *trp = auxiliary_get_drvdata(auxdev);
+ rapl_package_remove_pmu(trp->rp);
rapl_remove_package(trp->rp);
trp_release(trp);
}
diff --git a/drivers/ptp/ptp_vmw.c b/drivers/ptp/ptp_vmw.c
index 27c5547aa8a9..7ec90359428a 100644
--- a/drivers/ptp/ptp_vmw.c
+++ b/drivers/ptp/ptp_vmw.c
@@ -120,7 +120,6 @@ static struct acpi_driver ptp_vmw_acpi_driver = {
.add = ptp_vmw_acpi_add,
.remove = ptp_vmw_acpi_remove
},
- .owner = THIS_MODULE
};
static int __init ptp_vmw_init(void)
diff --git a/drivers/pwm/pwm-dwc-core.c b/drivers/pwm/pwm-dwc-core.c
index 043736972cb9..c8425493b95d 100644
--- a/drivers/pwm/pwm-dwc-core.c
+++ b/drivers/pwm/pwm-dwc-core.c
@@ -172,7 +172,6 @@ struct pwm_chip *dwc_pwm_alloc(struct device *dev)
dwc->clk_ns = 10;
chip->ops = &dwc_pwm_ops;
- dev_set_drvdata(dev, chip);
return chip;
}
EXPORT_SYMBOL_GPL(dwc_pwm_alloc);
diff --git a/drivers/pwm/pwm-dwc.c b/drivers/pwm/pwm-dwc.c
index 676eaf8d7a53..fb3eadf6fbc4 100644
--- a/drivers/pwm/pwm-dwc.c
+++ b/drivers/pwm/pwm-dwc.c
@@ -31,26 +31,34 @@ static const struct dwc_pwm_info ehl_pwm_info = {
.size = 0x1000,
};
-static int dwc_pwm_init_one(struct device *dev, void __iomem *base, unsigned int offset)
+static int dwc_pwm_init_one(struct device *dev, struct dwc_pwm_drvdata *ddata, unsigned int idx)
{
struct pwm_chip *chip;
struct dwc_pwm *dwc;
+ int ret;
chip = dwc_pwm_alloc(dev);
if (IS_ERR(chip))
return PTR_ERR(chip);
dwc = to_dwc_pwm(chip);
- dwc->base = base + offset;
+ dwc->base = ddata->io_base + (ddata->info->size * idx);
- return devm_pwmchip_add(dev, chip);
+ ret = devm_pwmchip_add(dev, chip);
+ if (ret)
+ return ret;
+
+ ddata->chips[idx] = chip;
+ return 0;
}
static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
const struct dwc_pwm_info *info;
struct device *dev = &pci->dev;
- int i, ret;
+ struct dwc_pwm_drvdata *ddata;
+ unsigned int idx;
+ int ret;
ret = pcim_enable_device(pci);
if (ret)
@@ -63,17 +71,25 @@ static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
return dev_err_probe(dev, ret, "Failed to iomap PCI BAR\n");
info = (const struct dwc_pwm_info *)id->driver_data;
-
- for (i = 0; i < info->nr; i++) {
- /*
- * No need to check for pcim_iomap_table() failure,
- * pcim_iomap_regions() already does it for us.
- */
- ret = dwc_pwm_init_one(dev, pcim_iomap_table(pci)[0], i * info->size);
+ ddata = devm_kzalloc(dev, struct_size(ddata, chips, info->nr), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ /*
+ * No need to check for pcim_iomap_table() failure,
+ * pcim_iomap_regions() already does it for us.
+ */
+ ddata->io_base = pcim_iomap_table(pci)[0];
+ ddata->info = info;
+
+ for (idx = 0; idx < ddata->info->nr; idx++) {
+ ret = dwc_pwm_init_one(dev, ddata, idx);
if (ret)
return ret;
}
+ dev_set_drvdata(dev, ddata);
+
pm_runtime_put(dev);
pm_runtime_allow(dev);
@@ -88,19 +104,24 @@ static void dwc_pwm_remove(struct pci_dev *pci)
static int dwc_pwm_suspend(struct device *dev)
{
- struct pwm_chip *chip = dev_get_drvdata(dev);
- struct dwc_pwm *dwc = to_dwc_pwm(chip);
- int i;
-
- for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
- if (chip->pwms[i].state.enabled) {
- dev_err(dev, "PWM %u in use by consumer (%s)\n",
- i, chip->pwms[i].label);
- return -EBUSY;
+ struct dwc_pwm_drvdata *ddata = dev_get_drvdata(dev);
+ unsigned int idx;
+
+ for (idx = 0; idx < ddata->info->nr; idx++) {
+ struct pwm_chip *chip = ddata->chips[idx];
+ struct dwc_pwm *dwc = to_dwc_pwm(chip);
+ unsigned int i;
+
+ for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
+ if (chip->pwms[i].state.enabled) {
+ dev_err(dev, "PWM %u in use by consumer (%s)\n",
+ i, chip->pwms[i].label);
+ return -EBUSY;
+ }
+ dwc->ctx[i].cnt = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT(i));
+ dwc->ctx[i].cnt2 = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT2(i));
+ dwc->ctx[i].ctrl = dwc_pwm_readl(dwc, DWC_TIM_CTRL(i));
}
- dwc->ctx[i].cnt = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT(i));
- dwc->ctx[i].cnt2 = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT2(i));
- dwc->ctx[i].ctrl = dwc_pwm_readl(dwc, DWC_TIM_CTRL(i));
}
return 0;
@@ -108,14 +129,19 @@ static int dwc_pwm_suspend(struct device *dev)
static int dwc_pwm_resume(struct device *dev)
{
- struct pwm_chip *chip = dev_get_drvdata(dev);
- struct dwc_pwm *dwc = to_dwc_pwm(chip);
- int i;
-
- for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
- dwc_pwm_writel(dwc, dwc->ctx[i].cnt, DWC_TIM_LD_CNT(i));
- dwc_pwm_writel(dwc, dwc->ctx[i].cnt2, DWC_TIM_LD_CNT2(i));
- dwc_pwm_writel(dwc, dwc->ctx[i].ctrl, DWC_TIM_CTRL(i));
+ struct dwc_pwm_drvdata *ddata = dev_get_drvdata(dev);
+ unsigned int idx;
+
+ for (idx = 0; idx < ddata->info->nr; idx++) {
+ struct pwm_chip *chip = ddata->chips[idx];
+ struct dwc_pwm *dwc = to_dwc_pwm(chip);
+ unsigned int i;
+
+ for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
+ dwc_pwm_writel(dwc, dwc->ctx[i].cnt, DWC_TIM_LD_CNT(i));
+ dwc_pwm_writel(dwc, dwc->ctx[i].cnt2, DWC_TIM_LD_CNT2(i));
+ dwc_pwm_writel(dwc, dwc->ctx[i].ctrl, DWC_TIM_CTRL(i));
+ }
}
return 0;
diff --git a/drivers/pwm/pwm-dwc.h b/drivers/pwm/pwm-dwc.h
index a8b074841ae8..c6e2df5a6122 100644
--- a/drivers/pwm/pwm-dwc.h
+++ b/drivers/pwm/pwm-dwc.h
@@ -38,6 +38,12 @@ struct dwc_pwm_info {
unsigned int size;
};
+struct dwc_pwm_drvdata {
+ const struct dwc_pwm_info *info;
+ void __iomem *io_base;
+ struct pwm_chip *chips[];
+};
+
struct dwc_pwm_ctx {
u32 cnt;
u32 cnt2;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 30f8e46dacdd..5794f4e9dd52 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1911,19 +1911,24 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
}
}
- if (err != -EEXIST)
+ if (err != -EEXIST) {
regulator->debugfs = debugfs_create_dir(supply_name, rdev->debugfs);
- if (IS_ERR(regulator->debugfs))
- rdev_dbg(rdev, "Failed to create debugfs directory\n");
+ if (IS_ERR(regulator->debugfs)) {
+ rdev_dbg(rdev, "Failed to create debugfs directory\n");
+ regulator->debugfs = NULL;
+ }
+ }
- debugfs_create_u32("uA_load", 0444, regulator->debugfs,
- &regulator->uA_load);
- debugfs_create_u32("min_uV", 0444, regulator->debugfs,
- &regulator->voltage[PM_SUSPEND_ON].min_uV);
- debugfs_create_u32("max_uV", 0444, regulator->debugfs,
- &regulator->voltage[PM_SUSPEND_ON].max_uV);
- debugfs_create_file("constraint_flags", 0444, regulator->debugfs,
- regulator, &constraint_flags_fops);
+ if (regulator->debugfs) {
+ debugfs_create_u32("uA_load", 0444, regulator->debugfs,
+ &regulator->uA_load);
+ debugfs_create_u32("min_uV", 0444, regulator->debugfs,
+ &regulator->voltage[PM_SUSPEND_ON].min_uV);
+ debugfs_create_u32("max_uV", 0444, regulator->debugfs,
+ &regulator->voltage[PM_SUSPEND_ON].max_uV);
+ debugfs_create_file("constraint_flags", 0444, regulator->debugfs,
+ regulator, &constraint_flags_fops);
+ }
/*
* Check now if the regulator is an always on regulator - if
diff --git a/drivers/regulator/irq_helpers.c b/drivers/regulator/irq_helpers.c
index fe7ae0f3f46a..5ab1a0befe12 100644
--- a/drivers/regulator/irq_helpers.c
+++ b/drivers/regulator/irq_helpers.c
@@ -352,6 +352,9 @@ void *regulator_irq_helper(struct device *dev,
h->irq = irq;
h->desc = *d;
+ h->desc.name = devm_kstrdup(dev, d->name, GFP_KERNEL);
+ if (!h->desc.name)
+ return ERR_PTR(-ENOMEM);
ret = init_rdev_state(dev, h, rdev, common_errs, per_rdev_errs,
rdev_amount);
diff --git a/drivers/regulator/mt6360-regulator.c b/drivers/regulator/mt6360-regulator.c
index ad6587a378d0..24cc9fc94e90 100644
--- a/drivers/regulator/mt6360-regulator.c
+++ b/drivers/regulator/mt6360-regulator.c
@@ -319,15 +319,15 @@ static unsigned int mt6360_regulator_of_map_mode(unsigned int hw_mode)
}
}
-#define MT6360_REGULATOR_DESC(_name, _sname, ereg, emask, vreg, vmask, \
- mreg, mmask, streg, stmask, vranges, \
- vcnts, offon_delay, irq_tbls) \
+#define MT6360_REGULATOR_DESC(match, _name, _sname, ereg, emask, vreg, \
+ vmask, mreg, mmask, streg, stmask, \
+ vranges, vcnts, offon_delay, irq_tbls) \
{ \
.desc = { \
.name = #_name, \
.supply_name = #_sname, \
.id = MT6360_REGULATOR_##_name, \
- .of_match = of_match_ptr(#_name), \
+ .of_match = of_match_ptr(match), \
.regulators_node = of_match_ptr("regulator"), \
.of_map_mode = mt6360_regulator_of_map_mode, \
.owner = THIS_MODULE, \
@@ -351,21 +351,29 @@ static unsigned int mt6360_regulator_of_map_mode(unsigned int hw_mode)
}
static const struct mt6360_regulator_desc mt6360_regulator_descs[] = {
- MT6360_REGULATOR_DESC(BUCK1, BUCK1_VIN, 0x117, 0x40, 0x110, 0xff, 0x117, 0x30, 0x117, 0x04,
+ MT6360_REGULATOR_DESC("buck1", BUCK1, BUCK1_VIN,
+ 0x117, 0x40, 0x110, 0xff, 0x117, 0x30, 0x117, 0x04,
buck_vout_ranges, 256, 0, buck1_irq_tbls),
- MT6360_REGULATOR_DESC(BUCK2, BUCK2_VIN, 0x127, 0x40, 0x120, 0xff, 0x127, 0x30, 0x127, 0x04,
+ MT6360_REGULATOR_DESC("buck2", BUCK2, BUCK2_VIN,
+ 0x127, 0x40, 0x120, 0xff, 0x127, 0x30, 0x127, 0x04,
buck_vout_ranges, 256, 0, buck2_irq_tbls),
- MT6360_REGULATOR_DESC(LDO6, LDO_VIN3, 0x137, 0x40, 0x13B, 0xff, 0x137, 0x30, 0x137, 0x04,
+ MT6360_REGULATOR_DESC("ldo6", LDO6, LDO_VIN3,
+ 0x137, 0x40, 0x13B, 0xff, 0x137, 0x30, 0x137, 0x04,
ldo_vout_ranges1, 256, 0, ldo6_irq_tbls),
- MT6360_REGULATOR_DESC(LDO7, LDO_VIN3, 0x131, 0x40, 0x135, 0xff, 0x131, 0x30, 0x131, 0x04,
+ MT6360_REGULATOR_DESC("ldo7", LDO7, LDO_VIN3,
+ 0x131, 0x40, 0x135, 0xff, 0x131, 0x30, 0x131, 0x04,
ldo_vout_ranges1, 256, 0, ldo7_irq_tbls),
- MT6360_REGULATOR_DESC(LDO1, LDO_VIN1, 0x217, 0x40, 0x21B, 0xff, 0x217, 0x30, 0x217, 0x04,
+ MT6360_REGULATOR_DESC("ldo1", LDO1, LDO_VIN1,
+ 0x217, 0x40, 0x21B, 0xff, 0x217, 0x30, 0x217, 0x04,
ldo_vout_ranges2, 256, 0, ldo1_irq_tbls),
- MT6360_REGULATOR_DESC(LDO2, LDO_VIN1, 0x211, 0x40, 0x215, 0xff, 0x211, 0x30, 0x211, 0x04,
+ MT6360_REGULATOR_DESC("ldo2", LDO2, LDO_VIN1,
+ 0x211, 0x40, 0x215, 0xff, 0x211, 0x30, 0x211, 0x04,
ldo_vout_ranges2, 256, 0, ldo2_irq_tbls),
- MT6360_REGULATOR_DESC(LDO3, LDO_VIN1, 0x205, 0x40, 0x209, 0xff, 0x205, 0x30, 0x205, 0x04,
+ MT6360_REGULATOR_DESC("ldo3", LDO3, LDO_VIN1,
+ 0x205, 0x40, 0x209, 0xff, 0x205, 0x30, 0x205, 0x04,
ldo_vout_ranges2, 256, 100, ldo3_irq_tbls),
- MT6360_REGULATOR_DESC(LDO5, LDO_VIN2, 0x20B, 0x40, 0x20F, 0x7f, 0x20B, 0x30, 0x20B, 0x04,
+ MT6360_REGULATOR_DESC("ldo5", LDO5, LDO_VIN2,
+ 0x20B, 0x40, 0x20F, 0x7f, 0x20B, 0x30, 0x20B, 0x04,
ldo_vout_ranges3, 128, 100, ldo5_irq_tbls),
};
diff --git a/drivers/regulator/qcom-refgen-regulator.c b/drivers/regulator/qcom-refgen-regulator.c
index 656fe330d38f..063e12c08e75 100644
--- a/drivers/regulator/qcom-refgen-regulator.c
+++ b/drivers/regulator/qcom-refgen-regulator.c
@@ -140,6 +140,7 @@ static const struct of_device_id qcom_refgen_match_table[] = {
{ .compatible = "qcom,sm8250-refgen-regulator", .data = &sm8250_refgen_desc },
{ }
};
+MODULE_DEVICE_TABLE(of, qcom_refgen_match_table);
static struct platform_driver qcom_refgen_driver = {
.probe = qcom_refgen_probe,
diff --git a/drivers/regulator/rtq2208-regulator.c b/drivers/regulator/rtq2208-regulator.c
index 63d4037e6aab..b90e53d922d6 100644
--- a/drivers/regulator/rtq2208-regulator.c
+++ b/drivers/regulator/rtq2208-regulator.c
@@ -53,7 +53,7 @@
/* Value */
#define RTQ2208_RAMP_VALUE_MIN_uV 500
-#define RTQ2208_RAMP_VALUE_MAX_uV 64000
+#define RTQ2208_RAMP_VALUE_MAX_uV 16000
#define RTQ2208_BUCK_MASK(uv_irq, ov_irq) (1 << ((uv_irq) % 8) | 1 << ((ov_irq) % 8))
@@ -147,12 +147,11 @@ static int rtq2208_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
* Because the relation of seleltion and value is like that
*
* seletion: value
- * 000: 64mv
- * 001: 32mv
+ * 010: 16mv
* ...
* 111: 0.5mv
*
- * For example, if I would like to select 64mv, the fls(ramp_delay) - 1 will be 0b111,
+ * For example, if I would like to select 16mv, the fls(ramp_delay) - 1 will be 0b010,
* and I need to use 0b111 - sel to do the shifting
*/
diff --git a/drivers/regulator/vqmmc-ipq4019-regulator.c b/drivers/regulator/vqmmc-ipq4019-regulator.c
index 086da36abc0b..4955616517ce 100644
--- a/drivers/regulator/vqmmc-ipq4019-regulator.c
+++ b/drivers/regulator/vqmmc-ipq4019-regulator.c
@@ -84,6 +84,7 @@ static const struct of_device_id regulator_ipq4019_of_match[] = {
{ .compatible = "qcom,vqmmc-ipq4019-regulator", },
{},
};
+MODULE_DEVICE_TABLE(of, regulator_ipq4019_of_match);
static struct platform_driver ipq4019_regulator_driver = {
.probe = ipq4019_regulator_probe,
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 85b27c42cf65..7112f5932609 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -103,9 +103,9 @@ config RESET_INTEL_GW
config RESET_K210
bool "Reset controller driver for Canaan Kendryte K210 SoC"
- depends on (SOC_CANAAN || COMPILE_TEST) && OF
+ depends on (SOC_CANAAN_K210 || COMPILE_TEST) && OF
select MFD_SYSCON
- default SOC_CANAAN
+ default SOC_CANAAN_K210
help
Support for the Canaan Kendryte K210 RISC-V SoC reset controller.
Say Y if you want to control reset signals provided by this
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index b0f6b3201636..81d6744e1861 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -32,7 +32,7 @@ obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
obj-$(CONFIG_PCI) += sclp_pci.o
-obj-$(subst m,y,$(CONFIG_ZCRYPT)) += sclp_ap.o
+obj-$(subst m,y,$(CONFIG_AP)) += sclp_ap.o
obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
obj-$(CONFIG_VMCP) += vmcp.o
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 37173cb0f5f5..c57694be9bd3 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -162,7 +162,8 @@ struct raw3270_request *raw3270_request_alloc(size_t size)
/*
* Setup ccw.
*/
- rq->ccw.cda = virt_to_dma32(rq->buffer);
+ if (rq->buffer)
+ rq->ccw.cda = virt_to_dma32(rq->buffer);
rq->ccw.flags = CCW_FLAG_SLI;
return rq;
@@ -188,7 +189,8 @@ int raw3270_request_reset(struct raw3270_request *rq)
return -EBUSY;
rq->ccw.cmd_code = 0;
rq->ccw.count = 0;
- rq->ccw.cda = virt_to_dma32(rq->buffer);
+ if (rq->buffer)
+ rq->ccw.cda = virt_to_dma32(rq->buffer);
rq->ccw.flags = CCW_FLAG_SLI;
rq->rescnt = 0;
rq->rc = 0;
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 675d7ed82356..a07bbecba61c 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -127,10 +127,9 @@ static int s390_vary_chpid(struct chp_id chpid, int on)
/*
* Channel measurement related functions
*/
-static ssize_t chp_measurement_chars_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t measurement_chars_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct channel_path *chp;
struct device *device;
@@ -143,87 +142,79 @@ static ssize_t chp_measurement_chars_read(struct file *filp,
return memory_read_from_buffer(buf, count, &off, &chp->cmg_chars,
sizeof(chp->cmg_chars));
}
+static BIN_ATTR_ADMIN_RO(measurement_chars, sizeof(struct cmg_chars));
-static const struct bin_attribute chp_measurement_chars_attr = {
- .attr = {
- .name = "measurement_chars",
- .mode = S_IRUSR,
- },
- .size = sizeof(struct cmg_chars),
- .read = chp_measurement_chars_read,
-};
-
-static void chp_measurement_copy_block(struct cmg_entry *buf,
- struct channel_subsystem *css,
- struct chp_id chpid)
-{
- void *area;
- struct cmg_entry *entry, reference_buf;
- int idx;
-
- if (chpid.id < 128) {
- area = css->cub_addr1;
- idx = chpid.id;
- } else {
- area = css->cub_addr2;
- idx = chpid.id - 128;
- }
- entry = area + (idx * sizeof(struct cmg_entry));
- do {
- memcpy(buf, entry, sizeof(*entry));
- memcpy(&reference_buf, entry, sizeof(*entry));
- } while (reference_buf.values[0] != buf->values[0]);
-}
-
-static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t chp_measurement_copy_block(void *buf, loff_t off, size_t count,
+ struct kobject *kobj, bool extended)
{
struct channel_path *chp;
struct channel_subsystem *css;
struct device *device;
unsigned int size;
+ void *area, *entry;
+ int id, idx;
device = kobj_to_dev(kobj);
chp = to_channelpath(device);
css = to_css(chp->dev.parent);
+ id = chp->chpid.id;
- size = sizeof(struct cmg_entry);
+ if (extended) {
+ /* Check if extended measurement data is available. */
+ if (!chp->extended)
+ return 0;
+
+ size = sizeof(struct cmg_ext_entry);
+ area = css->ecub[id / CSS_ECUES_PER_PAGE];
+ idx = id % CSS_ECUES_PER_PAGE;
+ } else {
+ size = sizeof(struct cmg_entry);
+ area = css->cub[id / CSS_CUES_PER_PAGE];
+ idx = id % CSS_CUES_PER_PAGE;
+ }
+ entry = area + (idx * size);
/* Only allow single reads. */
if (off || count < size)
return 0;
- chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid);
- count = size;
- return count;
+
+ memcpy(buf, entry, size);
+
+ return size;
}
-static const struct bin_attribute chp_measurement_attr = {
- .attr = {
- .name = "measurement",
- .mode = S_IRUSR,
- },
- .size = sizeof(struct cmg_entry),
- .read = chp_measurement_read,
+static ssize_t measurement_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ return chp_measurement_copy_block(buf, off, count, kobj, false);
+}
+static BIN_ATTR_ADMIN_RO(measurement, sizeof(struct cmg_entry));
+
+static ssize_t ext_measurement_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ return chp_measurement_copy_block(buf, off, count, kobj, true);
+}
+static BIN_ATTR_ADMIN_RO(ext_measurement, sizeof(struct cmg_ext_entry));
+
+static struct bin_attribute *measurement_attrs[] = {
+ &bin_attr_measurement_chars,
+ &bin_attr_measurement,
+ &bin_attr_ext_measurement,
+ NULL,
};
+BIN_ATTRIBUTE_GROUPS(measurement);
void chp_remove_cmg_attr(struct channel_path *chp)
{
- device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
- device_remove_bin_file(&chp->dev, &chp_measurement_attr);
+ device_remove_groups(&chp->dev, measurement_groups);
}
int chp_add_cmg_attr(struct channel_path *chp)
{
- int ret;
-
- ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
- if (ret)
- return ret;
- ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
- if (ret)
- device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
- return ret;
+ return device_add_groups(&chp->dev, measurement_groups);
}
/*
@@ -401,6 +392,35 @@ static ssize_t chp_esc_show(struct device *dev,
}
static DEVICE_ATTR(esc, 0444, chp_esc_show, NULL);
+static char apply_max_suffix(unsigned long *value, unsigned long base)
+{
+ static char suffixes[] = { 0, 'K', 'M', 'G', 'T' };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(suffixes) - 1; i++) {
+ if (*value < base || *value % base != 0)
+ break;
+ *value /= base;
+ }
+
+ return suffixes[i];
+}
+
+static ssize_t speed_bps_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct channel_path *chp = to_channelpath(dev);
+ unsigned long speed = chp->speed;
+ char suffix;
+
+ suffix = apply_max_suffix(&speed, 1000);
+
+ return suffix ? sysfs_emit(buf, "%lu%c\n", speed, suffix) :
+ sysfs_emit(buf, "%lu\n", speed);
+}
+
+static DEVICE_ATTR_RO(speed_bps);
+
static ssize_t util_string_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
@@ -432,6 +452,7 @@ static struct attribute *chp_attrs[] = {
&dev_attr_chid.attr,
&dev_attr_chid_external.attr,
&dev_attr_esc.attr,
+ &dev_attr_speed_bps.attr,
NULL,
};
static struct attribute_group chp_attr_group = {
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 7ee9eba0abcb..a15324a43aa3 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -51,6 +51,8 @@ struct channel_path {
/* Channel-measurement related stuff: */
int cmg;
int shared;
+ int extended;
+ unsigned long speed;
struct cmg_chars cmg_chars;
};
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 44ea76f9e1de..dcc1e1c34ca2 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -24,7 +24,6 @@
#include <asm/crw.h>
#include <asm/isc.h>
#include <asm/ebcdic.h>
-#include <asm/ap.h>
#include "css.h"
#include "cio.h"
@@ -40,6 +39,20 @@ static DEFINE_SPINLOCK(chsc_page_lock);
#define SEI_VF_FLA 0xc0 /* VF flag for Full Link Address */
#define SEI_RS_CHPID 0x4 /* 4 in RS field indicates CHPID */
+static BLOCKING_NOTIFIER_HEAD(chsc_notifiers);
+
+int chsc_notifier_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&chsc_notifiers, nb);
+}
+EXPORT_SYMBOL(chsc_notifier_register);
+
+int chsc_notifier_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&chsc_notifiers, nb);
+}
+EXPORT_SYMBOL(chsc_notifier_unregister);
+
/**
* chsc_error_from_response() - convert a chsc response to an error
* @response: chsc response code
@@ -581,7 +594,8 @@ static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area)
if (sei_area->rs != 5)
return;
- ap_bus_cfg_chg();
+ blocking_notifier_call_chain(&chsc_notifiers,
+ CHSC_NOTIFY_AP_CFG, NULL);
}
static void chsc_process_sei_fces_event(struct chsc_sei_nt0_area *sei_area)
@@ -857,22 +871,22 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
struct {
struct chsc_header request;
u32 operation_code : 2;
- u32 : 30;
+ u32 : 1;
+ u32 e : 1;
+ u32 : 28;
u32 key : 4;
u32 : 28;
- u32 zeroes1;
- dma32_t cub_addr1;
- u32 zeroes2;
- dma32_t cub_addr2;
- u32 reserved[13];
+ dma64_t cub[CSS_NUM_CUB_PAGES];
+ dma64_t ecub[CSS_NUM_ECUB_PAGES];
+ u32 reserved[5];
struct chsc_header response;
u32 status : 8;
u32 : 4;
u32 fmt : 4;
u32 : 16;
- } *secm_area;
+ } __packed *secm_area;
unsigned long flags;
- int ret, ccode;
+ int ret, ccode, i;
spin_lock_irqsave(&chsc_page_lock, flags);
memset(chsc_page, 0, PAGE_SIZE);
@@ -881,8 +895,12 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
secm_area->request.code = 0x0016;
secm_area->key = PAGE_DEFAULT_KEY >> 4;
- secm_area->cub_addr1 = virt_to_dma32(css->cub_addr1);
- secm_area->cub_addr2 = virt_to_dma32(css->cub_addr2);
+ secm_area->e = 1;
+
+ for (i = 0; i < CSS_NUM_CUB_PAGES; i++)
+ secm_area->cub[i] = (__force dma64_t)virt_to_dma32(css->cub[i]);
+ for (i = 0; i < CSS_NUM_ECUB_PAGES; i++)
+ secm_area->ecub[i] = virt_to_dma64(css->ecub[i]);
secm_area->operation_code = enable ? 0 : 1;
@@ -908,19 +926,47 @@ out:
return ret;
}
+static int cub_alloc(struct channel_subsystem *css)
+{
+ int i;
+
+ for (i = 0; i < CSS_NUM_CUB_PAGES; i++) {
+ css->cub[i] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!css->cub[i])
+ return -ENOMEM;
+ }
+ for (i = 0; i < CSS_NUM_ECUB_PAGES; i++) {
+ css->ecub[i] = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!css->ecub[i])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void cub_free(struct channel_subsystem *css)
+{
+ int i;
+
+ for (i = 0; i < CSS_NUM_CUB_PAGES; i++) {
+ free_page((unsigned long)css->cub[i]);
+ css->cub[i] = NULL;
+ }
+ for (i = 0; i < CSS_NUM_ECUB_PAGES; i++) {
+ free_page((unsigned long)css->ecub[i]);
+ css->ecub[i] = NULL;
+ }
+}
+
int
chsc_secm(struct channel_subsystem *css, int enable)
{
int ret;
if (enable && !css->cm_enabled) {
- css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!css->cub_addr1 || !css->cub_addr2) {
- free_page((unsigned long)css->cub_addr1);
- free_page((unsigned long)css->cub_addr2);
- return -ENOMEM;
- }
+ ret = cub_alloc(css);
+ if (ret)
+ goto out;
}
ret = __chsc_do_secm(css, enable);
if (!ret) {
@@ -934,10 +980,11 @@ chsc_secm(struct channel_subsystem *css, int enable)
} else
chsc_remove_cmg_attr(css);
}
- if (!css->cm_enabled) {
- free_page((unsigned long)css->cub_addr1);
- free_page((unsigned long)css->cub_addr2);
- }
+
+out:
+ if (!css->cm_enabled)
+ cub_free(css);
+
return ret;
}
@@ -1019,6 +1066,18 @@ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
}
}
+static unsigned long scmc_get_speed(u32 s, u32 p)
+{
+ unsigned long speed = s;
+
+ if (!p)
+ p = 8;
+ while (p--)
+ speed *= 10;
+
+ return speed;
+}
+
int chsc_get_channel_measurement_chars(struct channel_path *chp)
{
unsigned long flags;
@@ -1035,18 +1094,23 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
u32 zeroes2;
u32 not_valid : 1;
u32 shared : 1;
- u32 : 22;
+ u32 extended : 1;
+ u32 : 21;
u32 chpid : 8;
u32 cmcv : 5;
- u32 : 11;
+ u32 : 7;
+ u32 cmgp : 4;
u32 cmgq : 8;
u32 cmg : 8;
- u32 zeroes3;
+ u32 : 16;
+ u32 cmgs : 16;
u32 data[NR_MEASUREMENT_CHARS];
} *scmc_area;
chp->shared = -1;
chp->cmg = -1;
+ chp->extended = 0;
+ chp->speed = 0;
if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
return -EINVAL;
@@ -1076,10 +1140,8 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
chp->cmg = scmc_area->cmg;
chp->shared = scmc_area->shared;
- if (chp->cmg != 2 && chp->cmg != 3) {
- /* No cmg-dependent data. */
- goto out;
- }
+ chp->extended = scmc_area->extended;
+ chp->speed = scmc_get_speed(scmc_area->cmgs, scmc_area->cmgp);
chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
(struct cmg_chars *) &scmc_area->data);
out:
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 03602295f335..24cd65dbc5a7 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -22,6 +22,11 @@ struct cmg_entry {
u32 values[NR_MEASUREMENT_ENTRIES];
};
+#define NR_EXT_MEASUREMENT_ENTRIES 16
+struct cmg_ext_entry {
+ u32 values[NR_EXT_MEASUREMENT_ENTRIES];
+};
+
struct channel_path_desc_fmt1 {
u8 flags;
u8 lsn;
diff --git a/drivers/s390/cio/cio_inject.c b/drivers/s390/cio/cio_inject.c
index 8613fa937237..a2e771ebae8e 100644
--- a/drivers/s390/cio/cio_inject.c
+++ b/drivers/s390/cio/cio_inject.c
@@ -95,7 +95,7 @@ static ssize_t crw_inject_write(struct file *file, const char __user *buf,
return -EINVAL;
}
- buffer = vmemdup_user(buf, lbuf);
+ buffer = memdup_user_nul(buf, lbuf);
if (IS_ERR(buffer))
return -ENOMEM;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 1d68db1a3d4e..781f84901256 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -309,7 +309,7 @@ static ssize_t type_show(struct device *dev, struct device_attribute *attr,
{
struct subchannel *sch = to_subchannel(dev);
- return sprintf(buf, "%01x\n", sch->st);
+ return sysfs_emit(buf, "%01x\n", sch->st);
}
static DEVICE_ATTR_RO(type);
@@ -319,7 +319,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
{
struct subchannel *sch = to_subchannel(dev);
- return sprintf(buf, "css:t%01X\n", sch->st);
+ return sysfs_emit(buf, "css:t%01X\n", sch->st);
}
static DEVICE_ATTR_RO(modalias);
@@ -345,7 +345,7 @@ static ssize_t driver_override_show(struct device *dev,
ssize_t len;
device_lock(dev);
- len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
+ len = sysfs_emit(buf, "%s\n", sch->driver_override);
device_unlock(dev);
return len;
}
@@ -396,8 +396,8 @@ static ssize_t pimpampom_show(struct device *dev,
struct subchannel *sch = to_subchannel(dev);
struct pmcw *pmcw = &sch->schib.pmcw;
- return sprintf(buf, "%02x %02x %02x\n",
- pmcw->pim, pmcw->pam, pmcw->pom);
+ return sysfs_emit(buf, "%02x %02x %02x\n",
+ pmcw->pim, pmcw->pam, pmcw->pom);
}
static DEVICE_ATTR_RO(pimpampom);
@@ -881,7 +881,7 @@ static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
if (!css->id_valid)
return -EINVAL;
- return sprintf(buf, "%x\n", css->cssid);
+ return sysfs_emit(buf, "%x\n", css->cssid);
}
static DEVICE_ATTR_RO(real_cssid);
@@ -904,7 +904,7 @@ static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
int ret;
mutex_lock(&css->mutex);
- ret = sprintf(buf, "%x\n", css->cm_enabled);
+ ret = sysfs_emit(buf, "%x\n", css->cm_enabled);
mutex_unlock(&css->mutex);
return ret;
}
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index ea5550554297..c2b175592bb7 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -35,6 +35,15 @@
#define SNID_STATE3_SINGLE_PATH 0
/*
+ * Miscellaneous constants
+ */
+
+#define CSS_NUM_CUB_PAGES 2
+#define CSS_CUES_PER_PAGE 128
+#define CSS_NUM_ECUB_PAGES 4
+#define CSS_ECUES_PER_PAGE 64
+
+/*
* Conditions used to specify which subchannels need evaluation
*/
enum css_eval_cond {
@@ -122,8 +131,8 @@ struct channel_subsystem {
struct mutex mutex;
/* channel measurement related */
int cm_enabled;
- void *cub_addr1;
- void *cub_addr2;
+ void *cub[CSS_NUM_CUB_PAGES];
+ void *ecub[CSS_NUM_ECUB_PAGES];
/* for orphaned ccw devices */
struct subchannel *pseudo_subchannel;
};
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index f95d12345d98..920f550bc313 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -363,10 +363,8 @@ int ccw_device_set_online(struct ccw_device *cdev)
spin_lock_irq(cdev->ccwlock);
ret = ccw_device_online(cdev);
- spin_unlock_irq(cdev->ccwlock);
- if (ret == 0)
- wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
- else {
+ if (ret) {
+ spin_unlock_irq(cdev->ccwlock);
CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
"device 0.%x.%04x\n",
ret, cdev->private->dev_id.ssid,
@@ -375,7 +373,12 @@ int ccw_device_set_online(struct ccw_device *cdev)
put_device(&cdev->dev);
return ret;
}
- spin_lock_irq(cdev->ccwlock);
+ /* Wait until a final state is reached */
+ while (!dev_fsm_final_state(cdev)) {
+ spin_unlock_irq(cdev->ccwlock);
+ wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+ spin_lock_irq(cdev->ccwlock);
+ }
/* Check if online processing was successful */
if ((cdev->private->state != DEV_STATE_ONLINE) &&
(cdev->private->state != DEV_STATE_W4SENSE)) {
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 65d8b2cfd626..42791fa0b80e 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -504,6 +504,11 @@ callback:
ccw_device_done(cdev, DEV_STATE_ONLINE);
/* Deliver fake irb to device driver, if needed. */
if (cdev->private->flags.fake_irb) {
+ CIO_MSG_EVENT(2, "fakeirb: deliver device 0.%x.%04x intparm %lx type=%d\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno,
+ cdev->private->intparm,
+ cdev->private->flags.fake_irb);
create_fake_irb(&cdev->private->dma_area->irb,
cdev->private->flags.fake_irb);
cdev->private->flags.fake_irb = 0;
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 40c97f873075..acd6790dba4d 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -208,6 +208,10 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
if (!cdev->private->flags.fake_irb) {
cdev->private->flags.fake_irb = FAKE_CMD_IRB;
cdev->private->intparm = intparm;
+ CIO_MSG_EVENT(2, "fakeirb: queue device 0.%x.%04x intparm %lx type=%d\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, intparm,
+ cdev->private->flags.fake_irb);
return 0;
} else
/* There's already a fake I/O around. */
@@ -551,6 +555,10 @@ int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
if (!cdev->private->flags.fake_irb) {
cdev->private->flags.fake_irb = FAKE_TM_IRB;
cdev->private->intparm = intparm;
+ CIO_MSG_EVENT(2, "fakeirb: queue device 0.%x.%04x intparm %lx type=%d\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, intparm,
+ cdev->private->flags.fake_irb);
return 0;
} else
/* There's already a fake I/O around. */
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 3d9f0834c78b..a1cb39f4b7a2 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -722,8 +722,8 @@ static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
lgr_info_log();
}
-static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
- int dstat)
+static int qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
+ int dstat, int dcc)
{
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
@@ -731,15 +731,18 @@ static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
goto error;
if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
goto error;
+ if (dcc == 1)
+ return -EAGAIN;
if (!(dstat & DEV_STAT_DEV_END))
goto error;
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
- return;
+ return 0;
error:
DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ return -EIO;
}
/* qdio interrupt handler */
@@ -748,7 +751,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct subchannel_id schid;
- int cstat, dstat;
+ int cstat, dstat, rc, dcc;
if (!intparm || !irq_ptr) {
ccw_device_get_schid(cdev, &schid);
@@ -768,10 +771,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
qdio_irq_check_sense(irq_ptr, irb);
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
+ dcc = scsw_cmd_is_valid_cc(&irb->scsw) ? irb->scsw.cmd.cc : 0;
+ rc = 0;
switch (irq_ptr->state) {
case QDIO_IRQ_STATE_INACTIVE:
- qdio_establish_handle_irq(irq_ptr, cstat, dstat);
+ rc = qdio_establish_handle_irq(irq_ptr, cstat, dstat, dcc);
break;
case QDIO_IRQ_STATE_CLEANUP:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
@@ -785,12 +790,25 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
if (cstat || dstat)
qdio_handle_activate_check(irq_ptr, intparm, cstat,
dstat);
+ else if (dcc == 1)
+ rc = -EAGAIN;
break;
case QDIO_IRQ_STATE_STOPPED:
break;
default:
WARN_ON_ONCE(1);
}
+
+ if (rc == -EAGAIN) {
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qint retry");
+ rc = ccw_device_start(cdev, irq_ptr->ccw, intparm, 0, 0);
+ if (!rc)
+ return;
+ DBF_ERROR("%4x RETRY ERR", irq_ptr->schid.sch_no);
+ DBF_ERROR("rc:%4x", rc);
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ }
+
wake_up(&cdev->private->wait_q);
}
diff --git a/drivers/s390/cio/trace.h b/drivers/s390/cio/trace.h
index 86993de25345..a4c5c6736b31 100644
--- a/drivers/s390/cio/trace.h
+++ b/drivers/s390/cio/trace.h
@@ -50,7 +50,7 @@ DECLARE_EVENT_CLASS(s390_class_schib,
__entry->devno = schib->pmcw.dev;
__entry->schib = *schib;
__entry->pmcw_ena = schib->pmcw.ena;
- __entry->pmcw_st = schib->pmcw.ena;
+ __entry->pmcw_st = schib->pmcw.st;
__entry->pmcw_dnv = schib->pmcw.dnv;
__entry->pmcw_dev = schib->pmcw.dev;
__entry->pmcw_lpm = schib->pmcw.lpm;
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 0edacd101c12..bd94811fd9f1 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -4,7 +4,7 @@
#
ap-objs := ap_bus.o ap_card.o ap_queue.o
-obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o
+obj-$(CONFIG_AP) += ap.o
# zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o
zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o
zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index cce0bafd4c92..c20b45092079 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -39,13 +39,15 @@
#include <linux/ctype.h>
#include <linux/module.h>
#include <asm/uv.h>
+#include <asm/chsc.h>
#include "ap_bus.h"
#include "ap_debug.h"
-/*
- * Module parameters; note though this file itself isn't modular.
- */
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("Adjunct Processor Bus driver");
+MODULE_LICENSE("GPL");
+
int ap_domain_index = -1; /* Adjunct Processor Domain Index */
static DEFINE_SPINLOCK(ap_domain_lock);
module_param_named(domain, ap_domain_index, int, 0440);
@@ -90,8 +92,9 @@ static atomic64_t ap_bindings_complete_count = ATOMIC64_INIT(0);
/* completion for APQN bindings complete */
static DECLARE_COMPLETION(ap_apqn_bindings_complete);
-static struct ap_config_info *ap_qci_info;
-static struct ap_config_info *ap_qci_info_old;
+static struct ap_config_info qci[2];
+static struct ap_config_info *const ap_qci_info = &qci[0];
+static struct ap_config_info *const ap_qci_info_old = &qci[1];
/*
* AP bus related debug feature things.
@@ -203,9 +206,7 @@ static int ap_apft_available(void)
*/
static inline int ap_qact_available(void)
{
- if (ap_qci_info)
- return ap_qci_info->qact;
- return 0;
+ return ap_qci_info->qact;
}
/*
@@ -215,9 +216,7 @@ static inline int ap_qact_available(void)
*/
int ap_sb_available(void)
{
- if (ap_qci_info)
- return ap_qci_info->apsb;
- return 0;
+ return ap_qci_info->apsb;
}
/*
@@ -229,23 +228,6 @@ bool ap_is_se_guest(void)
}
EXPORT_SYMBOL(ap_is_se_guest);
-/*
- * ap_fetch_qci_info(): Fetch cryptographic config info
- *
- * Returns the ap configuration info fetched via PQAP(QCI).
- * On success 0 is returned, on failure a negative errno
- * is returned, e.g. if the PQAP(QCI) instruction is not
- * available, the return value will be -EOPNOTSUPP.
- */
-static inline int ap_fetch_qci_info(struct ap_config_info *info)
-{
- if (!ap_qci_available())
- return -EOPNOTSUPP;
- if (!info)
- return -EINVAL;
- return ap_qci(info);
-}
-
/**
* ap_init_qci_info(): Allocate and query qci config info.
* Does also update the static variables ap_max_domain_id
@@ -253,27 +235,12 @@ static inline int ap_fetch_qci_info(struct ap_config_info *info)
*/
static void __init ap_init_qci_info(void)
{
- if (!ap_qci_available()) {
+ if (!ap_qci_available() ||
+ ap_qci(ap_qci_info)) {
AP_DBF_INFO("%s QCI not supported\n", __func__);
return;
}
-
- ap_qci_info = kzalloc(sizeof(*ap_qci_info), GFP_KERNEL);
- if (!ap_qci_info)
- return;
- ap_qci_info_old = kzalloc(sizeof(*ap_qci_info_old), GFP_KERNEL);
- if (!ap_qci_info_old) {
- kfree(ap_qci_info);
- ap_qci_info = NULL;
- return;
- }
- if (ap_fetch_qci_info(ap_qci_info) != 0) {
- kfree(ap_qci_info);
- kfree(ap_qci_info_old);
- ap_qci_info = NULL;
- ap_qci_info_old = NULL;
- return;
- }
+ memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
AP_DBF_INFO("%s successful fetched initial qci info\n", __func__);
if (ap_qci_info->apxa) {
@@ -288,8 +255,6 @@ static void __init ap_init_qci_info(void)
__func__, ap_max_domain_id);
}
}
-
- memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
}
/*
@@ -312,7 +277,7 @@ static inline int ap_test_config_card_id(unsigned int id)
{
if (id > ap_max_adapter_id)
return 0;
- if (ap_qci_info)
+ if (ap_qci_info->flags)
return ap_test_config(ap_qci_info->apm, id);
return 1;
}
@@ -329,7 +294,7 @@ int ap_test_config_usage_domain(unsigned int domain)
{
if (domain > ap_max_domain_id)
return 0;
- if (ap_qci_info)
+ if (ap_qci_info->flags)
return ap_test_config(ap_qci_info->aqm, domain);
return 1;
}
@@ -1061,22 +1026,24 @@ EXPORT_SYMBOL(ap_bus_force_rescan);
/*
* A config change has happened, force an ap bus rescan.
*/
-void ap_bus_cfg_chg(void)
+static int ap_bus_cfg_chg(struct notifier_block *nb,
+ unsigned long action, void *data)
{
+ if (action != CHSC_NOTIFY_AP_CFG)
+ return NOTIFY_DONE;
+
pr_debug("%s config change, forcing bus rescan\n", __func__);
ap_bus_force_rescan();
+
+ return NOTIFY_OK;
}
-/*
- * hex2bitmap() - parse hex mask string and set bitmap.
- * Valid strings are "0x012345678" with at least one valid hex number.
- * Rest of the bitmap to the right is padded with 0. No spaces allowed
- * within the string, the leading 0x may be omitted.
- * Returns the bitmask with exactly the bits set as given by the hex
- * string (both in big endian order).
- */
-static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
+static struct notifier_block ap_bus_nb = {
+ .notifier_call = ap_bus_cfg_chg,
+};
+
+int ap_hex2bitmap(const char *str, unsigned long *bitmap, int bits)
{
int i, n, b;
@@ -1103,6 +1070,7 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
return -EINVAL;
return 0;
}
+EXPORT_SYMBOL(ap_hex2bitmap);
/*
* modify_bitmap() - parse bitmask argument and modify an existing
@@ -1168,7 +1136,7 @@ static int ap_parse_bitmap_str(const char *str, unsigned long *bitmap, int bits,
rc = modify_bitmap(str, newmap, bits);
} else {
memset(newmap, 0, size);
- rc = hex2bitmap(str, newmap, bits);
+ rc = ap_hex2bitmap(str, newmap, bits);
}
return rc;
}
@@ -1234,7 +1202,7 @@ static BUS_ATTR_RW(ap_domain);
static ssize_t ap_control_domain_mask_show(const struct bus_type *bus, char *buf)
{
- if (!ap_qci_info) /* QCI not supported */
+ if (!ap_qci_info->flags) /* QCI not supported */
return sysfs_emit(buf, "not supported\n");
return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
@@ -1248,7 +1216,7 @@ static BUS_ATTR_RO(ap_control_domain_mask);
static ssize_t ap_usage_domain_mask_show(const struct bus_type *bus, char *buf)
{
- if (!ap_qci_info) /* QCI not supported */
+ if (!ap_qci_info->flags) /* QCI not supported */
return sysfs_emit(buf, "not supported\n");
return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
@@ -1262,7 +1230,7 @@ static BUS_ATTR_RO(ap_usage_domain_mask);
static ssize_t ap_adapter_mask_show(const struct bus_type *bus, char *buf)
{
- if (!ap_qci_info) /* QCI not supported */
+ if (!ap_qci_info->flags) /* QCI not supported */
return sysfs_emit(buf, "not supported\n");
return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
@@ -1595,7 +1563,7 @@ static ssize_t features_show(const struct bus_type *bus, char *buf)
{
int n = 0;
- if (!ap_qci_info) /* QCI not supported */
+ if (!ap_qci_info->flags) /* QCI not supported */
return sysfs_emit(buf, "-\n");
if (ap_qci_info->apsc)
@@ -2158,11 +2126,11 @@ static inline void ap_scan_adapter(int ap)
*/
static bool ap_get_configuration(void)
{
- if (!ap_qci_info) /* QCI not supported */
+ if (!ap_qci_info->flags) /* QCI not supported */
return false;
memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
- ap_fetch_qci_info(ap_qci_info);
+ ap_qci(ap_qci_info);
return memcmp(ap_qci_info, ap_qci_info_old,
sizeof(struct ap_config_info)) != 0;
@@ -2179,7 +2147,7 @@ static bool ap_config_has_new_aps(void)
unsigned long m[BITS_TO_LONGS(AP_DEVICES)];
- if (!ap_qci_info)
+ if (!ap_qci_info->flags)
return false;
bitmap_andnot(m, (unsigned long *)ap_qci_info->apm,
@@ -2200,7 +2168,7 @@ static bool ap_config_has_new_doms(void)
{
unsigned long m[BITS_TO_LONGS(AP_DOMAINS)];
- if (!ap_qci_info)
+ if (!ap_qci_info->flags)
return false;
bitmap_andnot(m, (unsigned long *)ap_qci_info->aqm,
@@ -2310,7 +2278,82 @@ static void ap_scan_bus_wq_callback(struct work_struct *unused)
}
}
-static int __init ap_debug_init(void)
+static inline void __exit ap_async_exit(void)
+{
+ if (ap_thread_flag)
+ ap_poll_thread_stop();
+ chsc_notifier_unregister(&ap_bus_nb);
+ cancel_work(&ap_scan_bus_work);
+ hrtimer_cancel(&ap_poll_timer);
+ timer_delete(&ap_scan_bus_timer);
+}
+
+static inline int __init ap_async_init(void)
+{
+ int rc;
+
+ /* Setup the AP bus rescan timer. */
+ timer_setup(&ap_scan_bus_timer, ap_scan_bus_timer_callback, 0);
+
+ /*
+ * Setup the high resolution poll timer.
+ * If we are running under z/VM adjust polling to z/VM polling rate.
+ */
+ if (MACHINE_IS_VM)
+ poll_high_timeout = 1500000;
+ hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ ap_poll_timer.function = ap_poll_timeout;
+
+ queue_work(system_long_wq, &ap_scan_bus_work);
+
+ rc = chsc_notifier_register(&ap_bus_nb);
+ if (rc)
+ goto out;
+
+ /* Start the low priority AP bus poll thread. */
+ if (!ap_thread_flag)
+ return 0;
+
+ rc = ap_poll_thread_start();
+ if (rc)
+ goto out_notifier;
+
+ return 0;
+
+out_notifier:
+ chsc_notifier_unregister(&ap_bus_nb);
+out:
+ cancel_work(&ap_scan_bus_work);
+ hrtimer_cancel(&ap_poll_timer);
+ timer_delete(&ap_scan_bus_timer);
+ return rc;
+}
+
+static inline void ap_irq_exit(void)
+{
+ if (ap_irq_flag)
+ unregister_adapter_interrupt(&ap_airq);
+}
+
+static inline int __init ap_irq_init(void)
+{
+ int rc;
+
+ if (!ap_interrupts_available() || !ap_useirq)
+ return 0;
+
+ rc = register_adapter_interrupt(&ap_airq);
+ ap_irq_flag = (rc == 0);
+
+ return rc;
+}
+
+static inline void ap_debug_exit(void)
+{
+ debug_unregister(ap_dbf_info);
+}
+
+static inline int __init ap_debug_init(void)
{
ap_dbf_info = debug_register("ap", 2, 1,
AP_DBF_MAX_SPRINTF_ARGS * sizeof(long));
@@ -2378,12 +2421,6 @@ static int __init ap_module_init(void)
ap_domain_index = -1;
}
- /* enable interrupts if available */
- if (ap_interrupts_available() && ap_useirq) {
- rc = register_adapter_interrupt(&ap_airq);
- ap_irq_flag = (rc == 0);
- }
-
/* Create /sys/bus/ap. */
rc = bus_register(&ap_bus_type);
if (rc)
@@ -2396,38 +2433,37 @@ static int __init ap_module_init(void)
goto out_bus;
ap_root_device->bus = &ap_bus_type;
- /* Setup the AP bus rescan timer. */
- timer_setup(&ap_scan_bus_timer, ap_scan_bus_timer_callback, 0);
-
- /*
- * Setup the high resolution poll timer.
- * If we are running under z/VM adjust polling to z/VM polling rate.
- */
- if (MACHINE_IS_VM)
- poll_high_timeout = 1500000;
- hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- ap_poll_timer.function = ap_poll_timeout;
-
- /* Start the low priority AP bus poll thread. */
- if (ap_thread_flag) {
- rc = ap_poll_thread_start();
- if (rc)
- goto out_work;
- }
+ /* enable interrupts if available */
+ rc = ap_irq_init();
+ if (rc)
+ goto out_device;
- queue_work(system_long_wq, &ap_scan_bus_work);
+ /* Setup asynchronous work (timers, workqueue, etc). */
+ rc = ap_async_init();
+ if (rc)
+ goto out_irq;
return 0;
-out_work:
- hrtimer_cancel(&ap_poll_timer);
+out_irq:
+ ap_irq_exit();
+out_device:
root_device_unregister(ap_root_device);
out_bus:
bus_unregister(&ap_bus_type);
out:
- if (ap_irq_flag)
- unregister_adapter_interrupt(&ap_airq);
- kfree(ap_qci_info);
+ ap_debug_exit();
return rc;
}
-device_initcall(ap_module_init);
+
+static void __exit ap_module_exit(void)
+{
+ ap_async_exit();
+ ap_irq_exit();
+ root_device_unregister(ap_root_device);
+ bus_unregister(&ap_bus_type);
+ ap_debug_exit();
+}
+
+module_init(ap_module_init);
+module_exit(ap_module_exit);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 59c7ed49aa02..fdbc6fdfdf57 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -344,6 +344,28 @@ int ap_parse_mask_str(const char *str,
struct mutex *lock);
/*
+ * ap_hex2bitmap() - Convert a string containing a hexadecimal number (str)
+ * into a bitmap (bitmap) with bits set that correspond to the bits represented
+ * by the hex string. Input and output data is in big endian order.
+ *
+ * str - Input hex string of format "0x1234abcd". The leading "0x" is optional.
+ * At least one digit is required. Must be large enough to hold the number of
+ * bits represented by the bits parameter.
+ *
+ * bitmap - Pointer to a bitmap. Upon successful completion of this function,
+ * this bitmap will have bits set to match the value of str. If bitmap is longer
+ * than str, then the rightmost bits of bitmap are padded with zeros. Must be
+ * large enough to hold the number of bits represented by the bits parameter.
+ *
+ * bits - Length, in bits, of the bitmap represented by str. Must be a multiple
+ * of 8.
+ *
+ * Returns: 0 On success
+ * -EINVAL If str format is invalid or bits is not a multiple of 8.
+ */
+int ap_hex2bitmap(const char *str, unsigned long *bitmap, int bits);
+
+/*
* Interface to wait for the AP bus to have done one initial ap bus
* scan and all detected APQNs have been bound to device drivers.
* If these both conditions are not fulfilled, this function blocks
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 6e4e8d324a6d..1f647ffd6f4d 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -708,7 +708,7 @@ static ssize_t ap_functions_show(struct device *dev,
static DEVICE_ATTR_RO(ap_functions);
-#ifdef CONFIG_ZCRYPT_DEBUG
+#ifdef CONFIG_AP_DEBUG
static ssize_t states_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -820,7 +820,7 @@ static struct attribute *ap_queue_dev_attrs[] = {
&dev_attr_config.attr,
&dev_attr_chkstop.attr,
&dev_attr_ap_functions.attr,
-#ifdef CONFIG_ZCRYPT_DEBUG
+#ifdef CONFIG_AP_DEBUG
&dev_attr_states.attr,
&dev_attr_last_err_rc.attr,
#endif
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index fc169bc61593..9f76f2d7b66e 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -794,10 +794,11 @@ err_put_vdev:
static void vfio_ap_mdev_link_queue(struct ap_matrix_mdev *matrix_mdev,
struct vfio_ap_queue *q)
{
- if (q) {
- q->matrix_mdev = matrix_mdev;
- hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn);
- }
+ if (!q || vfio_ap_mdev_get_queue(matrix_mdev, q->apqn))
+ return;
+
+ q->matrix_mdev = matrix_mdev;
+ hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn);
}
static void vfio_ap_mdev_link_apqn(struct ap_matrix_mdev *matrix_mdev, int apqn)
@@ -1118,20 +1119,29 @@ static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev,
}
}
-static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev,
- unsigned long apid)
+static void vfio_ap_mdev_hot_unplug_adapters(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long *apids)
{
struct vfio_ap_queue *q, *tmpq;
struct list_head qlist;
+ unsigned long apid;
+ bool apcb_update = false;
INIT_LIST_HEAD(&qlist);
- vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, &qlist);
- if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) {
- clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
- vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ for_each_set_bit_inv(apid, apids, AP_DEVICES) {
+ vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, &qlist);
+
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) {
+ clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
+ apcb_update = true;
+ }
}
+ /* Only update apcb if needed to avoid impacting guest */
+ if (apcb_update)
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+
vfio_ap_mdev_reset_qlist(&qlist);
list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) {
@@ -1140,6 +1150,16 @@ static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev,
}
}
+static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid)
+{
+ DECLARE_BITMAP(apids, AP_DEVICES);
+
+ bitmap_zero(apids, AP_DEVICES);
+ set_bit_inv(apid, apids);
+ vfio_ap_mdev_hot_unplug_adapters(matrix_mdev, apids);
+}
+
/**
* unassign_adapter_store - parses the APID from @buf and clears the
* corresponding bit in the mediated matrix device's APM
@@ -1300,20 +1320,29 @@ static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev,
}
}
-static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev,
- unsigned long apqi)
+static void vfio_ap_mdev_hot_unplug_domains(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long *apqis)
{
struct vfio_ap_queue *q, *tmpq;
struct list_head qlist;
+ unsigned long apqi;
+ bool apcb_update = false;
INIT_LIST_HEAD(&qlist);
- vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, &qlist);
- if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
- clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm);
- vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ for_each_set_bit_inv(apqi, apqis, AP_DOMAINS) {
+ vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, &qlist);
+
+ if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
+ clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm);
+ apcb_update = true;
+ }
}
+ /* Only update apcb if needed to avoid impacting guest */
+ if (apcb_update)
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+
vfio_ap_mdev_reset_qlist(&qlist);
list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) {
@@ -1322,6 +1351,16 @@ static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev,
}
}
+static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi)
+{
+ DECLARE_BITMAP(apqis, AP_DOMAINS);
+
+ bitmap_zero(apqis, AP_DEVICES);
+ set_bit_inv(apqi, apqis);
+ vfio_ap_mdev_hot_unplug_domains(matrix_mdev, apqis);
+}
+
/**
* unassign_domain_store - parses the APQI from @buf and clears the
* corresponding bit in the mediated matrix device's AQM
@@ -1570,6 +1609,158 @@ static ssize_t guest_matrix_show(struct device *dev,
}
static DEVICE_ATTR_RO(guest_matrix);
+static ssize_t write_ap_bitmap(unsigned long *bitmap, char *buf, int offset, char sep)
+{
+ return sysfs_emit_at(buf, offset, "0x%016lx%016lx%016lx%016lx%c",
+ bitmap[0], bitmap[1], bitmap[2], bitmap[3], sep);
+}
+
+static ssize_t ap_config_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+ int idx = 0;
+
+ idx += write_ap_bitmap(matrix_mdev->matrix.apm, buf, idx, ',');
+ idx += write_ap_bitmap(matrix_mdev->matrix.aqm, buf, idx, ',');
+ idx += write_ap_bitmap(matrix_mdev->matrix.adm, buf, idx, '\n');
+
+ return idx;
+}
+
+/* Number of characters needed for a complete hex mask representing the bits in .. */
+#define AP_DEVICES_STRLEN (AP_DEVICES / 4 + 3)
+#define AP_DOMAINS_STRLEN (AP_DOMAINS / 4 + 3)
+#define AP_CONFIG_STRLEN (AP_DEVICES_STRLEN + 2 * AP_DOMAINS_STRLEN)
+
+static int parse_bitmap(char **strbufptr, unsigned long *bitmap, int nbits)
+{
+ char *curmask;
+
+ curmask = strsep(strbufptr, ",\n");
+ if (!curmask)
+ return -EINVAL;
+
+ bitmap_clear(bitmap, 0, nbits);
+ return ap_hex2bitmap(curmask, bitmap, nbits);
+}
+
+static int ap_matrix_overflow_check(struct ap_matrix_mdev *matrix_mdev)
+{
+ unsigned long bit;
+
+ for_each_set_bit_inv(bit, matrix_mdev->matrix.apm, AP_DEVICES) {
+ if (bit > matrix_mdev->matrix.apm_max)
+ return -ENODEV;
+ }
+
+ for_each_set_bit_inv(bit, matrix_mdev->matrix.aqm, AP_DOMAINS) {
+ if (bit > matrix_mdev->matrix.aqm_max)
+ return -ENODEV;
+ }
+
+ for_each_set_bit_inv(bit, matrix_mdev->matrix.adm, AP_DOMAINS) {
+ if (bit > matrix_mdev->matrix.adm_max)
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void ap_matrix_copy(struct ap_matrix *dst, struct ap_matrix *src)
+{
+ /* This check works around false positive gcc -Wstringop-overread */
+ if (!src)
+ return;
+
+ bitmap_copy(dst->apm, src->apm, AP_DEVICES);
+ bitmap_copy(dst->aqm, src->aqm, AP_DOMAINS);
+ bitmap_copy(dst->adm, src->adm, AP_DOMAINS);
+}
+
+static ssize_t ap_config_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+ struct ap_matrix m_new, m_old, m_added, m_removed;
+ DECLARE_BITMAP(apm_filtered, AP_DEVICES);
+ unsigned long newbit;
+ char *newbuf, *rest;
+ int rc = count;
+ bool do_update;
+
+ newbuf = kstrndup(buf, AP_CONFIG_STRLEN, GFP_KERNEL);
+ if (!newbuf)
+ return -ENOMEM;
+ rest = newbuf;
+
+ mutex_lock(&ap_perms_mutex);
+ get_update_locks_for_mdev(matrix_mdev);
+
+ /* Save old state */
+ ap_matrix_copy(&m_old, &matrix_mdev->matrix);
+ if (parse_bitmap(&rest, m_new.apm, AP_DEVICES) ||
+ parse_bitmap(&rest, m_new.aqm, AP_DOMAINS) ||
+ parse_bitmap(&rest, m_new.adm, AP_DOMAINS)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ bitmap_andnot(m_removed.apm, m_old.apm, m_new.apm, AP_DEVICES);
+ bitmap_andnot(m_removed.aqm, m_old.aqm, m_new.aqm, AP_DOMAINS);
+ bitmap_andnot(m_added.apm, m_new.apm, m_old.apm, AP_DEVICES);
+ bitmap_andnot(m_added.aqm, m_new.aqm, m_old.aqm, AP_DOMAINS);
+
+ /* Need new bitmaps in matrix_mdev for validation */
+ ap_matrix_copy(&matrix_mdev->matrix, &m_new);
+
+ /* Ensure new state is valid, else undo new state */
+ rc = vfio_ap_mdev_validate_masks(matrix_mdev);
+ if (rc) {
+ ap_matrix_copy(&matrix_mdev->matrix, &m_old);
+ goto out;
+ }
+ rc = ap_matrix_overflow_check(matrix_mdev);
+ if (rc) {
+ ap_matrix_copy(&matrix_mdev->matrix, &m_old);
+ goto out;
+ }
+ rc = count;
+
+ /* Need old bitmaps in matrix_mdev for unplug/unlink */
+ ap_matrix_copy(&matrix_mdev->matrix, &m_old);
+
+ /* Unlink removed adapters/domains */
+ vfio_ap_mdev_hot_unplug_adapters(matrix_mdev, m_removed.apm);
+ vfio_ap_mdev_hot_unplug_domains(matrix_mdev, m_removed.aqm);
+
+ /* Need new bitmaps in matrix_mdev for linking new adapters/domains */
+ ap_matrix_copy(&matrix_mdev->matrix, &m_new);
+
+ /* Link newly added adapters */
+ for_each_set_bit_inv(newbit, m_added.apm, AP_DEVICES)
+ vfio_ap_mdev_link_adapter(matrix_mdev, newbit);
+
+ for_each_set_bit_inv(newbit, m_added.aqm, AP_DOMAINS)
+ vfio_ap_mdev_link_domain(matrix_mdev, newbit);
+
+ /* filter resources not bound to vfio-ap */
+ do_update = vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered);
+ do_update |= vfio_ap_mdev_filter_cdoms(matrix_mdev);
+
+ /* Apply changes to shadow apbc if things changed */
+ if (do_update) {
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ reset_queues_for_apids(matrix_mdev, apm_filtered);
+ }
+out:
+ release_update_locks_for_mdev(matrix_mdev);
+ mutex_unlock(&ap_perms_mutex);
+ kfree(newbuf);
+ return rc;
+}
+static DEVICE_ATTR_RW(ap_config);
+
static struct attribute *vfio_ap_mdev_attrs[] = {
&dev_attr_assign_adapter.attr,
&dev_attr_unassign_adapter.attr,
@@ -1577,6 +1768,7 @@ static struct attribute *vfio_ap_mdev_attrs[] = {
&dev_attr_unassign_domain.attr,
&dev_attr_assign_control_domain.attr,
&dev_attr_unassign_control_domain.attr,
+ &dev_attr_ap_config.attr,
&dev_attr_control_domains.attr,
&dev_attr_matrix.attr,
&dev_attr_guest_matrix.attr,
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
index 98d37aa27044..437a161c8659 100644
--- a/drivers/s390/crypto/vfio_ap_private.h
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -75,11 +75,11 @@ extern struct ap_matrix_dev *matrix_dev;
*/
struct ap_matrix {
unsigned long apm_max;
- DECLARE_BITMAP(apm, 256);
+ DECLARE_BITMAP(apm, AP_DEVICES);
unsigned long aqm_max;
- DECLARE_BITMAP(aqm, 256);
+ DECLARE_BITMAP(aqm, AP_DOMAINS);
unsigned long adm_max;
- DECLARE_BITMAP(adm, 256);
+ DECLARE_BITMAP(adm, AP_DOMAINS);
};
/**
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index 0a3a678ffc7e..6087547328ce 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -658,7 +658,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
(int)prepcblk->ccp_rtcode,
(int)prepcblk->ccp_rscode);
if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
- rc = -EAGAIN;
+ rc = -EBUSY;
else
rc = -EIO;
goto out;
@@ -1263,7 +1263,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
(int)prepcblk->ccp_rtcode,
(int)prepcblk->ccp_rscode);
if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
- rc = -EAGAIN;
+ rc = -EBUSY;
else
rc = -EIO;
goto out;
@@ -1426,7 +1426,7 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
(int)prepcblk->ccp_rtcode,
(int)prepcblk->ccp_rscode);
if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
- rc = -EAGAIN;
+ rc = -EBUSY;
else
rc = -EIO;
goto out;
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
index eb7f5489ccf9..9bcf8fc69ebe 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.c
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -556,13 +556,29 @@ static int check_reply_pl(const u8 *pl, const char *func)
pl += 2;
ret = *((u32 *)pl);
if (ret != 0) {
- ZCRYPT_DBF_ERR("%s return value 0x%04x != 0\n", func, ret);
+ ZCRYPT_DBF_ERR("%s return value 0x%08x != 0\n", func, ret);
return -EIO;
}
return 0;
}
+/* Check ep11 reply cprb, return 0 or suggested errno value. */
+static int check_reply_cprb(const struct ep11_cprb *rep, const char *func)
+{
+ /* check ep11 reply return code field */
+ if (rep->ret_code) {
+ ZCRYPT_DBF_ERR("%s ep11 reply ret_code=0x%08x\n", __func__,
+ rep->ret_code);
+ if (rep->ret_code == 0x000c0003)
+ return -EBUSY;
+ else
+ return -EIO;
+ }
+
+ return 0;
+}
+
/*
* Helper function which does an ep11 query with given query type.
*/
@@ -627,6 +643,12 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
goto out;
}
+ /* check ep11 reply cprb */
+ rc = check_reply_cprb(rep, __func__);
+ if (rc)
+ goto out;
+
+ /* check payload */
rc = check_reply_pl((u8 *)rep_pl, __func__);
if (rc)
goto out;
@@ -877,6 +899,12 @@ static int _ep11_genaeskey(u16 card, u16 domain,
goto out;
}
+ /* check ep11 reply cprb */
+ rc = check_reply_cprb(rep, __func__);
+ if (rc)
+ goto out;
+
+ /* check payload */
rc = check_reply_pl((u8 *)rep_pl, __func__);
if (rc)
goto out;
@@ -1028,6 +1056,12 @@ static int ep11_cryptsingle(u16 card, u16 domain,
goto out;
}
+ /* check ep11 reply cprb */
+ rc = check_reply_cprb(rep, __func__);
+ if (rc)
+ goto out;
+
+ /* check payload */
rc = check_reply_pl((u8 *)rep_pl, __func__);
if (rc)
goto out;
@@ -1185,6 +1219,12 @@ static int _ep11_unwrapkey(u16 card, u16 domain,
goto out;
}
+ /* check ep11 reply cprb */
+ rc = check_reply_cprb(rep, __func__);
+ if (rc)
+ goto out;
+
+ /* check payload */
rc = check_reply_pl((u8 *)rep_pl, __func__);
if (rc)
goto out;
@@ -1339,6 +1379,12 @@ static int _ep11_wrapkey(u16 card, u16 domain,
goto out;
}
+ /* check ep11 reply cprb */
+ rc = check_reply_cprb(rep, __func__);
+ if (rc)
+ goto out;
+
+ /* check payload */
rc = check_reply_pl((u8 *)rep_pl, __func__);
if (rc)
goto out;
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 2c8e964425dc..43778b088ffa 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -292,13 +292,16 @@ out:
static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
clear_bit(dmb->sba_idx, ism->sba_bitmap);
- dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
- dmb->cpu_addr, dmb->dma_addr);
+ dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len,
+ DMA_FROM_DEVICE);
+ folio_put(virt_to_folio(dmb->cpu_addr));
}
static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
+ struct folio *folio;
unsigned long bit;
+ int rc;
if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
return -EINVAL;
@@ -315,14 +318,30 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
return -EINVAL;
- dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
- &dmb->dma_addr,
- GFP_KERNEL | __GFP_NOWARN |
- __GFP_NOMEMALLOC | __GFP_NORETRY);
- if (!dmb->cpu_addr)
- clear_bit(dmb->sba_idx, ism->sba_bitmap);
+ folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
+ __GFP_NORETRY, get_order(dmb->dmb_len));
- return dmb->cpu_addr ? 0 : -ENOMEM;
+ if (!folio) {
+ rc = -ENOMEM;
+ goto out_bit;
+ }
+
+ dmb->cpu_addr = folio_address(folio);
+ dmb->dma_addr = dma_map_page(&ism->pdev->dev,
+ virt_to_page(dmb->cpu_addr), 0,
+ dmb->dmb_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ism->pdev->dev, dmb->dma_addr)) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+ kfree(dmb->cpu_addr);
+out_bit:
+ clear_bit(dmb->sba_idx, ism->sba_bitmap);
+ return rc;
}
int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index f0b8b709649f..a3adaec5504e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -364,30 +364,33 @@ out:
return rc;
}
+static void qeth_free_cq(struct qeth_card *card)
+{
+ if (card->qdio.c_q) {
+ qeth_free_qdio_queue(card->qdio.c_q);
+ card->qdio.c_q = NULL;
+ }
+}
+
static int qeth_alloc_cq(struct qeth_card *card)
{
if (card->options.cq == QETH_CQ_ENABLED) {
QETH_CARD_TEXT(card, 2, "cqon");
- card->qdio.c_q = qeth_alloc_qdio_queue();
if (!card->qdio.c_q) {
- dev_err(&card->gdev->dev, "Failed to create completion queue\n");
- return -ENOMEM;
+ card->qdio.c_q = qeth_alloc_qdio_queue();
+ if (!card->qdio.c_q) {
+ dev_err(&card->gdev->dev,
+ "Failed to create completion queue\n");
+ return -ENOMEM;
+ }
}
} else {
QETH_CARD_TEXT(card, 2, "nocq");
- card->qdio.c_q = NULL;
+ qeth_free_cq(card);
}
return 0;
}
-static void qeth_free_cq(struct qeth_card *card)
-{
- if (card->qdio.c_q) {
- qeth_free_qdio_queue(card->qdio.c_q);
- card->qdio.c_q = NULL;
- }
-}
-
static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
int delayed)
{
@@ -2628,6 +2631,10 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
QETH_CARD_TEXT(card, 2, "allcqdbf");
+ /* completion */
+ if (qeth_alloc_cq(card))
+ goto out_err;
+
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
return 0;
@@ -2663,10 +2670,6 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
}
- /* completion */
- if (qeth_alloc_cq(card))
- goto out_freeoutq;
-
return 0;
out_freeoutq:
@@ -2677,6 +2680,8 @@ out_freeoutq:
qeth_free_buffer_pool(card);
out_buffer_pool:
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
+ qeth_free_cq(card);
+out_err:
return -ENOMEM;
}
@@ -2684,11 +2689,12 @@ static void qeth_free_qdio_queues(struct qeth_card *card)
{
int i, j;
+ qeth_free_cq(card);
+
if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
QETH_QDIO_UNINITIALIZED)
return;
- qeth_free_cq(card);
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
if (card->qdio.in_q->bufs[j].rx_skb) {
consume_skb(card->qdio.in_q->bufs[j].rx_skb);
@@ -3742,24 +3748,11 @@ static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
{
- int rc;
-
- if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
- rc = -1;
- goto out;
- } else {
- if (card->options.cq == cq) {
- rc = 0;
- goto out;
- }
-
- qeth_free_qdio_queues(card);
- card->options.cq = cq;
- rc = 0;
- }
-out:
- return rc;
+ if (card->options.cq == QETH_CQ_NOTAVAILABLE)
+ return -1;
+ card->options.cq = cq;
+ return 0;
}
EXPORT_SYMBOL_GPL(qeth_configure_cq);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 097dfe4b620d..35f8e00850d6 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -1797,7 +1797,7 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
if (dev_is_sata(device)) {
struct ata_link *link = &device->sata_dev.ap->link;
- rc = ata_wait_after_reset(link, HISI_SAS_WAIT_PHYUP_TIMEOUT,
+ rc = ata_wait_after_reset(link, jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT,
smp_ata_check_ready_type);
} else {
msleep(2000);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 7d2a33514538..34f96cc35342 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -2244,7 +2244,15 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
if ((dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) &&
(sipc_rx_err_type & RX_FIS_STATUS_ERR_MSK)) {
- ts->stat = SAS_PROTO_RESPONSE;
+ if (task->ata_task.use_ncq) {
+ struct domain_device *device = task->dev;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+
+ sas_dev->dev_status = HISI_SAS_DEV_NCQ_ERR;
+ slot->abort = 1;
+ } else {
+ ts->stat = SAS_PROTO_RESPONSE;
+ }
} else if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
ts->residual = trans_tx_fail_type;
ts->stat = SAS_DATA_UNDERRUN;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index d32ad46318cb..231b4d0df85f 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -209,17 +209,13 @@ static int mpi3mr_report_manufacture(struct mpi3mr_ioc *mrioc,
goto out;
}
- strscpy(edev->vendor_id, manufacture_reply->vendor_id,
- SAS_EXPANDER_VENDOR_ID_LEN);
- strscpy(edev->product_id, manufacture_reply->product_id,
- SAS_EXPANDER_PRODUCT_ID_LEN);
- strscpy(edev->product_rev, manufacture_reply->product_rev,
- SAS_EXPANDER_PRODUCT_REV_LEN);
+ memtostr(edev->vendor_id, manufacture_reply->vendor_id);
+ memtostr(edev->product_id, manufacture_reply->product_id);
+ memtostr(edev->product_rev, manufacture_reply->product_rev);
edev->level = manufacture_reply->sas_format & 1;
if (edev->level) {
- strscpy(edev->component_vendor_id,
- manufacture_reply->component_vendor_id,
- SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
+ memtostr(edev->component_vendor_id,
+ manufacture_reply->component_vendor_id);
tmp = (u8 *)&manufacture_reply->component_id;
edev->component_id = tmp[0] << 8 | tmp[1];
edev->component_revision_id =
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
index 26e6b3e3af43..dcde55c8ee5d 100644
--- a/drivers/scsi/qla2xxx/qla_edif.c
+++ b/drivers/scsi/qla2xxx/qla_edif.c
@@ -1100,7 +1100,7 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
if (fcport->edif.enable) {
- if (pcnt > app_req.num_ports)
+ if (pcnt >= app_req.num_ports)
break;
app_reply->elem[pcnt].rekey_count =
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 083f94e43fba..82a7e21ddc83 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -1909,10 +1909,8 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
if (fx_type == FXDISC_GET_CONFIG_INFO) {
struct config_info_data *pinfo =
(struct config_info_data *) fdisc->u.fxiocb.rsp_addr;
- strscpy(vha->hw->model_number, pinfo->model_num,
- ARRAY_SIZE(vha->hw->model_number));
- strscpy(vha->hw->model_desc, pinfo->model_description,
- ARRAY_SIZE(vha->hw->model_desc));
+ memtostr(vha->hw->model_number, pinfo->model_num);
+ memtostr(vha->hw->model_desc, pinfo->model_description);
memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,
sizeof(vha->hw->mr.symbolic_name));
memcpy(&vha->hw->mr.serial_num, pinfo->serial_num,
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 2e28e2360c85..967b6d62bb37 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -635,10 +635,9 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
if (blk_queue_add_random(q))
add_disk_randomness(req->q->disk);
- if (!blk_rq_is_passthrough(req)) {
- WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
- cmd->flags &= ~SCMD_INITIALIZED;
- }
+ WARN_ON_ONCE(!blk_rq_is_passthrough(req) &&
+ !(cmd->flags & SCMD_INITIALIZED));
+ cmd->flags = 0;
/*
* Calling rcu_barrier() is not necessary here because the
@@ -1870,7 +1869,6 @@ out_put_budget:
case BLK_STS_OK:
break;
case BLK_STS_RESOURCE:
- case BLK_STS_ZONE_RESOURCE:
if (scsi_device_blocked(sdev))
ret = BLK_STS_DEV_RESOURCE;
break;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 58fdf679341d..64c5129044b3 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1260,12 +1260,6 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
}
}
- if (req_op(rq) == REQ_OP_ZONE_APPEND) {
- ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks);
- if (ret)
- goto fail;
- }
-
fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
dix = scsi_prot_sg_count(cmd);
dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type);
@@ -1348,7 +1342,6 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
return sd_setup_flush_cmnd(cmd);
case REQ_OP_READ:
case REQ_OP_WRITE:
- case REQ_OP_ZONE_APPEND:
return sd_setup_read_write_cmnd(cmd);
case REQ_OP_ZONE_RESET:
return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
@@ -3120,6 +3113,7 @@ static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
{
struct scsi_device *sdp = sdkp->device;
const struct scsi_io_group_descriptor *desc, *start, *end;
+ u16 permanent_stream_count_old;
struct scsi_sense_hdr sshdr;
struct scsi_mode_data data;
int res;
@@ -3140,12 +3134,13 @@ static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
for (desc = start; desc < end; desc++)
if (!desc->st_enble || !sd_is_perm_stream(sdkp, desc - start))
break;
+ permanent_stream_count_old = sdkp->permanent_stream_count;
sdkp->permanent_stream_count = desc - start;
if (sdkp->rscs && sdkp->permanent_stream_count < 2)
sd_printk(KERN_INFO, sdkp,
"Unexpected: RSCS has been set and the permanent stream count is %u\n",
sdkp->permanent_stream_count);
- else if (sdkp->permanent_stream_count)
+ else if (sdkp->permanent_stream_count != permanent_stream_count_old)
sd_printk(KERN_INFO, sdkp, "permanent stream count = %d\n",
sdkp->permanent_stream_count);
}
@@ -3979,7 +3974,6 @@ static void scsi_disk_release(struct device *dev)
struct scsi_disk *sdkp = to_scsi_disk(dev);
ida_free(&sd_index_ida, sdkp->index);
- sd_zbc_free_zone_info(sdkp);
put_device(&sdkp->device->sdev_gendev);
free_opal_dev(sdkp->opal_dev);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 5c4285a582b2..49dd600bfa48 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -104,12 +104,6 @@ struct scsi_disk {
* between zone starting LBAs is constant.
*/
u32 zone_starting_lba_gran;
- u32 *zones_wp_offset;
- spinlock_t zones_wp_offset_lock;
- u32 *rev_wp_offset;
- struct mutex rev_mutex;
- struct work_struct zone_wp_offset_work;
- char *zone_wp_update_buf;
#endif
atomic_t openers;
sector_t capacity; /* size in logical blocks */
@@ -245,7 +239,6 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
#ifdef CONFIG_BLK_DEV_ZONED
-void sd_zbc_free_zone_info(struct scsi_disk *sdkp);
int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]);
int sd_zbc_revalidate_zones(struct scsi_disk *sdkp);
blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
@@ -255,13 +248,8 @@ unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
-blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
- unsigned int nr_blocks);
-
#else /* CONFIG_BLK_DEV_ZONED */
-static inline void sd_zbc_free_zone_info(struct scsi_disk *sdkp) {}
-
static inline int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
{
return 0;
@@ -285,13 +273,6 @@ static inline unsigned int sd_zbc_complete(struct scsi_cmnd *cmd,
return good_bytes;
}
-static inline blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd,
- sector_t *lba,
- unsigned int nr_blocks)
-{
- return BLK_STS_TARGET;
-}
-
#define sd_zbc_report_zones NULL
#endif /* CONFIG_BLK_DEV_ZONED */
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 26af5ab7d7c1..806036e48abe 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -23,36 +23,6 @@
#define CREATE_TRACE_POINTS
#include "sd_trace.h"
-/**
- * sd_zbc_get_zone_wp_offset - Get zone write pointer offset.
- * @zone: Zone for which to return the write pointer offset.
- *
- * Return: offset of the write pointer from the start of the zone.
- */
-static unsigned int sd_zbc_get_zone_wp_offset(struct blk_zone *zone)
-{
- if (zone->type == ZBC_ZONE_TYPE_CONV)
- return 0;
-
- switch (zone->cond) {
- case BLK_ZONE_COND_IMP_OPEN:
- case BLK_ZONE_COND_EXP_OPEN:
- case BLK_ZONE_COND_CLOSED:
- return zone->wp - zone->start;
- case BLK_ZONE_COND_FULL:
- return zone->len;
- case BLK_ZONE_COND_EMPTY:
- case BLK_ZONE_COND_OFFLINE:
- case BLK_ZONE_COND_READONLY:
- default:
- /*
- * Offline and read-only zones do not have a valid
- * write pointer. Use 0 as for an empty zone.
- */
- return 0;
- }
-}
-
/* Whether or not a SCSI zone descriptor describes a gap zone. */
static bool sd_zbc_is_gap_zone(const u8 buf[64])
{
@@ -121,9 +91,6 @@ static int sd_zbc_parse_report(struct scsi_disk *sdkp, const u8 buf[64],
if (ret)
return ret;
- if (sdkp->rev_wp_offset)
- sdkp->rev_wp_offset[idx] = sd_zbc_get_zone_wp_offset(&zone);
-
return 0;
}
@@ -347,123 +314,6 @@ static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd)
return BLK_STS_OK;
}
-#define SD_ZBC_INVALID_WP_OFST (~0u)
-#define SD_ZBC_UPDATING_WP_OFST (SD_ZBC_INVALID_WP_OFST - 1)
-
-static int sd_zbc_update_wp_offset_cb(struct blk_zone *zone, unsigned int idx,
- void *data)
-{
- struct scsi_disk *sdkp = data;
-
- lockdep_assert_held(&sdkp->zones_wp_offset_lock);
-
- sdkp->zones_wp_offset[idx] = sd_zbc_get_zone_wp_offset(zone);
-
- return 0;
-}
-
-/*
- * An attempt to append a zone triggered an invalid write pointer error.
- * Reread the write pointer of the zone(s) in which the append failed.
- */
-static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
-{
- struct scsi_disk *sdkp;
- unsigned long flags;
- sector_t zno;
- int ret;
-
- sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);
-
- spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
- for (zno = 0; zno < sdkp->zone_info.nr_zones; zno++) {
- if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST)
- continue;
-
- spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
- ret = sd_zbc_do_report_zones(sdkp, sdkp->zone_wp_update_buf,
- SD_BUF_SIZE,
- zno * sdkp->zone_info.zone_blocks, true);
- spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
- if (!ret)
- sd_zbc_parse_report(sdkp, sdkp->zone_wp_update_buf + 64,
- zno, sd_zbc_update_wp_offset_cb,
- sdkp);
- }
- spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
-
- scsi_device_put(sdkp->device);
-}
-
-/**
- * sd_zbc_prepare_zone_append() - Prepare an emulated ZONE_APPEND command.
- * @cmd: the command to setup
- * @lba: the LBA to patch
- * @nr_blocks: the number of LBAs to be written
- *
- * Called from sd_setup_read_write_cmnd() for REQ_OP_ZONE_APPEND.
- * @sd_zbc_prepare_zone_append() handles the necessary zone wrote locking and
- * patching of the lba for an emulated ZONE_APPEND command.
- *
- * In case the cached write pointer offset is %SD_ZBC_INVALID_WP_OFST it will
- * schedule a REPORT ZONES command and return BLK_STS_IOERR.
- */
-blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
- unsigned int nr_blocks)
-{
- struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
- unsigned int wp_offset, zno = blk_rq_zone_no(rq);
- unsigned long flags;
- blk_status_t ret;
-
- ret = sd_zbc_cmnd_checks(cmd);
- if (ret != BLK_STS_OK)
- return ret;
-
- if (!blk_rq_zone_is_seq(rq))
- return BLK_STS_IOERR;
-
- /* Unlock of the write lock will happen in sd_zbc_complete() */
- if (!blk_req_zone_write_trylock(rq))
- return BLK_STS_ZONE_RESOURCE;
-
- spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
- wp_offset = sdkp->zones_wp_offset[zno];
- switch (wp_offset) {
- case SD_ZBC_INVALID_WP_OFST:
- /*
- * We are about to schedule work to update a zone write pointer
- * offset, which will cause the zone append command to be
- * requeued. So make sure that the scsi device does not go away
- * while the work is being processed.
- */
- if (scsi_device_get(sdkp->device)) {
- ret = BLK_STS_IOERR;
- break;
- }
- sdkp->zones_wp_offset[zno] = SD_ZBC_UPDATING_WP_OFST;
- schedule_work(&sdkp->zone_wp_offset_work);
- fallthrough;
- case SD_ZBC_UPDATING_WP_OFST:
- ret = BLK_STS_DEV_RESOURCE;
- break;
- default:
- wp_offset = sectors_to_logical(sdkp->device, wp_offset);
- if (wp_offset + nr_blocks > sdkp->zone_info.zone_blocks) {
- ret = BLK_STS_IOERR;
- break;
- }
-
- trace_scsi_prepare_zone_append(cmd, *lba, wp_offset);
- *lba += wp_offset;
- }
- spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
- if (ret)
- blk_req_zone_write_unlock(rq);
- return ret;
-}
-
/**
* sd_zbc_setup_zone_mgmt_cmnd - Prepare a zone ZBC_OUT command. The operations
* can be RESET WRITE POINTER, OPEN, CLOSE or FINISH.
@@ -504,96 +354,6 @@ blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
return BLK_STS_OK;
}
-static bool sd_zbc_need_zone_wp_update(struct request *rq)
-{
- switch (req_op(rq)) {
- case REQ_OP_ZONE_APPEND:
- case REQ_OP_ZONE_FINISH:
- case REQ_OP_ZONE_RESET:
- case REQ_OP_ZONE_RESET_ALL:
- return true;
- case REQ_OP_WRITE:
- case REQ_OP_WRITE_ZEROES:
- return blk_rq_zone_is_seq(rq);
- default:
- return false;
- }
-}
-
-/**
- * sd_zbc_zone_wp_update - Update cached zone write pointer upon cmd completion
- * @cmd: Completed command
- * @good_bytes: Command reply bytes
- *
- * Called from sd_zbc_complete() to handle the update of the cached zone write
- * pointer value in case an update is needed.
- */
-static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
- unsigned int good_bytes)
-{
- int result = cmd->result;
- struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
- unsigned int zno = blk_rq_zone_no(rq);
- enum req_op op = req_op(rq);
- unsigned long flags;
-
- /*
- * If we got an error for a command that needs updating the write
- * pointer offset cache, we must mark the zone wp offset entry as
- * invalid to force an update from disk the next time a zone append
- * command is issued.
- */
- spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
-
- if (result && op != REQ_OP_ZONE_RESET_ALL) {
- if (op == REQ_OP_ZONE_APPEND) {
- /* Force complete completion (no retry) */
- good_bytes = 0;
- scsi_set_resid(cmd, blk_rq_bytes(rq));
- }
-
- /*
- * Force an update of the zone write pointer offset on
- * the next zone append access.
- */
- if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST)
- sdkp->zones_wp_offset[zno] = SD_ZBC_INVALID_WP_OFST;
- goto unlock_wp_offset;
- }
-
- switch (op) {
- case REQ_OP_ZONE_APPEND:
- trace_scsi_zone_wp_update(cmd, rq->__sector,
- sdkp->zones_wp_offset[zno], good_bytes);
- rq->__sector += sdkp->zones_wp_offset[zno];
- fallthrough;
- case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE:
- if (sdkp->zones_wp_offset[zno] < sd_zbc_zone_sectors(sdkp))
- sdkp->zones_wp_offset[zno] +=
- good_bytes >> SECTOR_SHIFT;
- break;
- case REQ_OP_ZONE_RESET:
- sdkp->zones_wp_offset[zno] = 0;
- break;
- case REQ_OP_ZONE_FINISH:
- sdkp->zones_wp_offset[zno] = sd_zbc_zone_sectors(sdkp);
- break;
- case REQ_OP_ZONE_RESET_ALL:
- memset(sdkp->zones_wp_offset, 0,
- sdkp->zone_info.nr_zones * sizeof(unsigned int));
- break;
- default:
- break;
- }
-
-unlock_wp_offset:
- spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
-
- return good_bytes;
-}
-
/**
* sd_zbc_complete - ZBC command post processing.
* @cmd: Completed command
@@ -619,11 +379,7 @@ unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
* so be quiet about the error.
*/
rq->rq_flags |= RQF_QUIET;
- } else if (sd_zbc_need_zone_wp_update(rq))
- good_bytes = sd_zbc_zone_wp_update(cmd, good_bytes);
-
- if (req_op(rq) == REQ_OP_ZONE_APPEND)
- blk_req_zone_write_unlock(rq);
+ }
return good_bytes;
}
@@ -780,46 +536,6 @@ static void sd_zbc_print_zones(struct scsi_disk *sdkp)
sdkp->zone_info.zone_blocks);
}
-static int sd_zbc_init_disk(struct scsi_disk *sdkp)
-{
- sdkp->zones_wp_offset = NULL;
- spin_lock_init(&sdkp->zones_wp_offset_lock);
- sdkp->rev_wp_offset = NULL;
- mutex_init(&sdkp->rev_mutex);
- INIT_WORK(&sdkp->zone_wp_offset_work, sd_zbc_update_wp_offset_workfn);
- sdkp->zone_wp_update_buf = kzalloc(SD_BUF_SIZE, GFP_KERNEL);
- if (!sdkp->zone_wp_update_buf)
- return -ENOMEM;
-
- return 0;
-}
-
-void sd_zbc_free_zone_info(struct scsi_disk *sdkp)
-{
- if (!sdkp->zone_wp_update_buf)
- return;
-
- /* Serialize against revalidate zones */
- mutex_lock(&sdkp->rev_mutex);
-
- kvfree(sdkp->zones_wp_offset);
- sdkp->zones_wp_offset = NULL;
- kfree(sdkp->zone_wp_update_buf);
- sdkp->zone_wp_update_buf = NULL;
-
- sdkp->early_zone_info = (struct zoned_disk_info){ };
- sdkp->zone_info = (struct zoned_disk_info){ };
-
- mutex_unlock(&sdkp->rev_mutex);
-}
-
-static void sd_zbc_revalidate_zones_cb(struct gendisk *disk)
-{
- struct scsi_disk *sdkp = scsi_disk(disk);
-
- swap(sdkp->zones_wp_offset, sdkp->rev_wp_offset);
-}
-
/*
* Call blk_revalidate_disk_zones() if any of the zoned disk properties have
* changed that make it necessary to call that function. Called by
@@ -831,18 +547,8 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
struct request_queue *q = disk->queue;
u32 zone_blocks = sdkp->early_zone_info.zone_blocks;
unsigned int nr_zones = sdkp->early_zone_info.nr_zones;
- int ret = 0;
unsigned int flags;
-
- /*
- * For all zoned disks, initialize zone append emulation data if not
- * already done.
- */
- if (sd_is_zoned(sdkp) && !sdkp->zone_wp_update_buf) {
- ret = sd_zbc_init_disk(sdkp);
- if (ret)
- return ret;
- }
+ int ret;
/*
* There is nothing to do for regular disks, including host-aware disks
@@ -851,50 +557,32 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
if (!blk_queue_is_zoned(q))
return 0;
- /*
- * Make sure revalidate zones are serialized to ensure exclusive
- * updates of the scsi disk data.
- */
- mutex_lock(&sdkp->rev_mutex);
-
if (sdkp->zone_info.zone_blocks == zone_blocks &&
sdkp->zone_info.nr_zones == nr_zones &&
disk->nr_zones == nr_zones)
- goto unlock;
+ return 0;
- flags = memalloc_noio_save();
sdkp->zone_info.zone_blocks = zone_blocks;
sdkp->zone_info.nr_zones = nr_zones;
- sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_KERNEL);
- if (!sdkp->rev_wp_offset) {
- ret = -ENOMEM;
- memalloc_noio_restore(flags);
- goto unlock;
- }
blk_queue_chunk_sectors(q,
logical_to_sectors(sdkp->device, zone_blocks));
- blk_queue_max_zone_append_sectors(q,
- q->limits.max_segments << PAGE_SECTORS_SHIFT);
- ret = blk_revalidate_disk_zones(disk, sd_zbc_revalidate_zones_cb);
+ /* Enable block layer zone append emulation */
+ blk_queue_max_zone_append_sectors(q, 0);
+ flags = memalloc_noio_save();
+ ret = blk_revalidate_disk_zones(disk);
memalloc_noio_restore(flags);
- kvfree(sdkp->rev_wp_offset);
- sdkp->rev_wp_offset = NULL;
-
if (ret) {
sdkp->zone_info = (struct zoned_disk_info){ };
sdkp->capacity = 0;
- goto unlock;
+ return ret;
}
sd_zbc_print_zones(sdkp);
-unlock:
- mutex_unlock(&sdkp->rev_mutex);
-
- return ret;
+ return 0;
}
/**
@@ -917,10 +605,8 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
if (!sd_is_zoned(sdkp)) {
/*
* Device managed or normal SCSI disk, no special handling
- * required. Nevertheless, free the disk zone information in
- * case the device type changed.
+ * required.
*/
- sd_zbc_free_zone_info(sdkp);
return 0;
}
@@ -941,7 +627,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
/* The drive satisfies the kernel restrictions: set it up */
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
- blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
if (sdkp->zones_max_open == U32_MAX)
disk_set_max_open_zones(disk, 0);
else
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 386981c6976a..baf870a03ecf 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -285,6 +285,7 @@ sg_open(struct inode *inode, struct file *filp)
int dev = iminor(inode);
int flags = filp->f_flags;
struct request_queue *q;
+ struct scsi_device *device;
Sg_device *sdp;
Sg_fd *sfp;
int retval;
@@ -301,11 +302,12 @@ sg_open(struct inode *inode, struct file *filp)
/* This driver's module count bumped by fops_get in <linux/fs.h> */
/* Prevent the device driver from vanishing while we sleep */
- retval = scsi_device_get(sdp->device);
+ device = sdp->device;
+ retval = scsi_device_get(device);
if (retval)
goto sg_put;
- retval = scsi_autopm_get_device(sdp->device);
+ retval = scsi_autopm_get_device(device);
if (retval)
goto sdp_put;
@@ -313,7 +315,7 @@ sg_open(struct inode *inode, struct file *filp)
* check if O_NONBLOCK. Permits SCSI commands to be issued
* during error recovery. Tread carefully. */
if (!((flags & O_NONBLOCK) ||
- scsi_block_when_processing_errors(sdp->device))) {
+ scsi_block_when_processing_errors(device))) {
retval = -ENXIO;
/* we are in error recovery for this device */
goto error_out;
@@ -344,7 +346,7 @@ sg_open(struct inode *inode, struct file *filp)
if (sdp->open_cnt < 1) { /* no existing opens */
sdp->sgdebug = 0;
- q = sdp->device->request_queue;
+ q = device->request_queue;
sdp->sg_tablesize = queue_max_segments(q);
}
sfp = sg_add_sfp(sdp);
@@ -370,10 +372,11 @@ out_undo:
error_mutex_locked:
mutex_unlock(&sdp->open_rel_lock);
error_out:
- scsi_autopm_put_device(sdp->device);
+ scsi_autopm_put_device(device);
sdp_put:
- scsi_device_put(sdp->device);
- goto sg_put;
+ kref_put(&sdp->d_ref, sg_device_destroy);
+ scsi_device_put(device);
+ return retval;
}
/* Release resources associated with a successful sg_open()
@@ -2233,7 +2236,6 @@ sg_remove_sfp_usercontext(struct work_struct *work)
"sg_remove_sfp: sfp=0x%p\n", sfp));
kfree(sfp);
- WARN_ON_ONCE(kref_read(&sdp->d_ref) != 1);
kref_put(&sdp->d_ref, sg_device_destroy);
scsi_device_put(device);
module_put(THIS_MODULE);
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index efeba8275a66..a09a26bf4988 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -1451,7 +1451,11 @@ static void qcom_slim_ngd_up_worker(struct work_struct *work)
ctrl = container_of(work, struct qcom_slim_ngd_ctrl, ngd_up_work);
/* Make sure qmi service is up before continuing */
- wait_for_completion_interruptible(&ctrl->qmi_up);
+ if (!wait_for_completion_interruptible_timeout(&ctrl->qmi_up,
+ msecs_to_jiffies(MSEC_PER_SEC))) {
+ dev_err(ctrl->dev, "QMI wait timeout\n");
+ return;
+ }
mutex_lock(&ctrl->ssr_lock);
qcom_slim_ngd_enable(ctrl, true);
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index ba8f5b5460e1..fb2bd31387d0 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -7,7 +7,7 @@ obj-y += apple/
obj-y += aspeed/
obj-$(CONFIG_ARCH_AT91) += atmel/
obj-y += bcm/
-obj-$(CONFIG_SOC_CANAAN) += canaan/
+obj-$(CONFIG_ARCH_CANAAN) += canaan/
obj-$(CONFIG_ARCH_DOVE) += dove/
obj-$(CONFIG_MACH_DOVE) += dove/
obj-y += fsl/
diff --git a/drivers/soc/canaan/Kconfig b/drivers/soc/canaan/Kconfig
index 43ced2bf8444..3121d351fea6 100644
--- a/drivers/soc/canaan/Kconfig
+++ b/drivers/soc/canaan/Kconfig
@@ -2,9 +2,9 @@
config SOC_K210_SYSCTL
bool "Canaan Kendryte K210 SoC system controller"
- depends on RISCV && SOC_CANAAN && OF
+ depends on RISCV && SOC_CANAAN_K210 && OF
depends on COMMON_CLK_K210
- default SOC_CANAAN
+ default SOC_CANAAN_K210
select PM
select MFD_SYSCON
help
diff --git a/drivers/soc/hisilicon/Kconfig b/drivers/soc/hisilicon/Kconfig
index 0ab688af308f..4b0a099b28cc 100644
--- a/drivers/soc/hisilicon/Kconfig
+++ b/drivers/soc/hisilicon/Kconfig
@@ -6,7 +6,7 @@ menu "Hisilicon SoC drivers"
config KUNPENG_HCCS
tristate "HCCS driver on Kunpeng SoC"
depends on ACPI
- depends on MAILBOX
+ depends on PCC
depends on ARM64 || COMPILE_TEST
help
The Huawei Cache Coherence System (HCCS) is a multi-chip
diff --git a/drivers/soc/hisilicon/kunpeng_hccs.c b/drivers/soc/hisilicon/kunpeng_hccs.c
index 9ff70b38e5e9..e882a61636ec 100644
--- a/drivers/soc/hisilicon/kunpeng_hccs.c
+++ b/drivers/soc/hisilicon/kunpeng_hccs.c
@@ -556,6 +556,12 @@ static int hccs_get_all_port_attr(struct hccs_dev *hdev,
start_id = rsp_head.next_id;
}
+ if (left_buf_len != 0) {
+ dev_err(hdev->dev, "failed to get the expected port number(%u) attribute.\n",
+ size);
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 50c664b65f4d..1b7afb19ccd6 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -72,6 +72,7 @@ config MTK_SOCINFO
tristate "MediaTek SoC Information"
default y
depends on NVMEM_MTK_EFUSE
+ select SOC_BUS
help
The MediaTek SoC Information (mtk-socinfo) driver provides
information about the SoC to the userspace including the
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index b0cd071c4719..046522664dc1 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -12,9 +12,12 @@
#define CMDQ_WRITE_ENABLE_MASK BIT(0)
#define CMDQ_POLL_ENABLE_MASK BIT(0)
+/* dedicate the last GPR_R15 to assign the register address to be poll */
+#define CMDQ_POLL_ADDR_GPR (15)
#define CMDQ_EOC_IRQ_EN BIT(0)
#define CMDQ_REG_TYPE 1
-#define CMDQ_JUMP_RELATIVE 1
+#define CMDQ_JUMP_RELATIVE 0
+#define CMDQ_JUMP_ABSOLUTE 1
struct cmdq_instruction {
union {
@@ -55,7 +58,7 @@ int cmdq_dev_get_client_reg(struct device *dev,
"mediatek,gce-client-reg",
3, idx, &spec);
if (err < 0) {
- dev_err(dev,
+ dev_warn(dev,
"error %d can't parse gce-client-reg property (%d)",
err, idx);
@@ -105,22 +108,16 @@ void cmdq_mbox_destroy(struct cmdq_client *client)
}
EXPORT_SYMBOL(cmdq_mbox_destroy);
-struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
+int cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, size_t size)
{
- struct cmdq_pkt *pkt;
struct device *dev;
dma_addr_t dma_addr;
- pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
- if (!pkt)
- return ERR_PTR(-ENOMEM);
pkt->va_base = kzalloc(size, GFP_KERNEL);
- if (!pkt->va_base) {
- kfree(pkt);
- return ERR_PTR(-ENOMEM);
- }
+ if (!pkt->va_base)
+ return -ENOMEM;
+
pkt->buf_size = size;
- pkt->cl = (void *)client;
dev = client->chan->mbox->dev;
dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
@@ -128,24 +125,20 @@ struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
kfree(pkt->va_base);
- kfree(pkt);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
pkt->pa_base = dma_addr;
- return pkt;
+ return 0;
}
EXPORT_SYMBOL(cmdq_pkt_create);
-void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
+void cmdq_pkt_destroy(struct cmdq_client *client, struct cmdq_pkt *pkt)
{
- struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
-
dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
DMA_TO_DEVICE);
kfree(pkt->va_base);
- kfree(pkt);
}
EXPORT_SYMBOL(cmdq_pkt_destroy);
@@ -299,6 +292,32 @@ int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
}
EXPORT_SYMBOL(cmdq_pkt_write_s_mask_value);
+int cmdq_pkt_mem_move(struct cmdq_pkt *pkt, dma_addr_t src_addr, dma_addr_t dst_addr)
+{
+ const u16 high_addr_reg_idx = CMDQ_THR_SPR_IDX0;
+ const u16 value_reg_idx = CMDQ_THR_SPR_IDX1;
+ int ret;
+
+ /* read the value of src_addr into high_addr_reg_idx */
+ ret = cmdq_pkt_assign(pkt, high_addr_reg_idx, CMDQ_ADDR_HIGH(src_addr));
+ if (ret < 0)
+ return ret;
+ ret = cmdq_pkt_read_s(pkt, high_addr_reg_idx, CMDQ_ADDR_LOW(src_addr), value_reg_idx);
+ if (ret < 0)
+ return ret;
+
+ /* write the value of value_reg_idx into dst_addr */
+ ret = cmdq_pkt_assign(pkt, high_addr_reg_idx, CMDQ_ADDR_HIGH(dst_addr));
+ if (ret < 0)
+ return ret;
+ ret = cmdq_pkt_write_s(pkt, high_addr_reg_idx, CMDQ_ADDR_LOW(dst_addr), value_reg_idx);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(cmdq_pkt_mem_move);
+
int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear)
{
struct cmdq_instruction inst = { {0} };
@@ -315,6 +334,21 @@ int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear)
}
EXPORT_SYMBOL(cmdq_pkt_wfe);
+int cmdq_pkt_acquire_event(struct cmdq_pkt *pkt, u16 event)
+{
+ struct cmdq_instruction inst = {};
+
+ if (event >= CMDQ_MAX_EVENT)
+ return -EINVAL;
+
+ inst.op = CMDQ_CODE_WFE;
+ inst.value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE | CMDQ_WFE_WAIT;
+ inst.event = event;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_acquire_event);
+
int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
{
struct cmdq_instruction inst = { {0} };
@@ -380,6 +414,53 @@ int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
}
EXPORT_SYMBOL(cmdq_pkt_poll_mask);
+int cmdq_pkt_poll_addr(struct cmdq_pkt *pkt, dma_addr_t addr, u32 value, u32 mask)
+{
+ struct cmdq_instruction inst = { {0} };
+ u8 use_mask = 0;
+ int ret;
+
+ /*
+ * Append an MASK instruction to set the mask for following POLL instruction
+ * which enables use_mask bit.
+ */
+ if (mask != GENMASK(31, 0)) {
+ inst.op = CMDQ_CODE_MASK;
+ inst.mask = ~mask;
+ ret = cmdq_pkt_append_command(pkt, inst);
+ if (ret < 0)
+ return ret;
+ use_mask = CMDQ_POLL_ENABLE_MASK;
+ }
+
+ /*
+ * POLL is an legacy operation in GCE and it does not support SPR and CMDQ_CODE_LOGIC,
+ * so it can not use cmdq_pkt_assign to keep polling register address to SPR.
+ * If user wants to poll a register address which doesn't have a subsys id,
+ * user needs to use GPR and CMDQ_CODE_MASK to move polling register address to GPR.
+ */
+ inst.op = CMDQ_CODE_MASK;
+ inst.dst_t = CMDQ_REG_TYPE;
+ inst.sop = CMDQ_POLL_ADDR_GPR;
+ inst.value = addr;
+ ret = cmdq_pkt_append_command(pkt, inst);
+ if (ret < 0)
+ return ret;
+
+ /* Append POLL instruction to poll the register address assign to GPR previously. */
+ inst.op = CMDQ_CODE_POLL;
+ inst.dst_t = CMDQ_REG_TYPE;
+ inst.sop = CMDQ_POLL_ADDR_GPR;
+ inst.offset = use_mask;
+ inst.value = value;
+ ret = cmdq_pkt_append_command(pkt, inst);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(cmdq_pkt_poll_addr);
+
int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
{
struct cmdq_instruction inst = {};
@@ -392,17 +473,36 @@ int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
}
EXPORT_SYMBOL(cmdq_pkt_assign);
-int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr)
+int cmdq_pkt_jump_abs(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa)
{
struct cmdq_instruction inst = {};
inst.op = CMDQ_CODE_JUMP;
- inst.offset = CMDQ_JUMP_RELATIVE;
- inst.value = addr >>
- cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
+ inst.offset = CMDQ_JUMP_ABSOLUTE;
+ inst.value = addr >> shift_pa;
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_jump_abs);
+
+int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa)
+{
+ struct cmdq_instruction inst = { {0} };
+
+ inst.op = CMDQ_CODE_JUMP;
+ inst.value = (u32)offset >> shift_pa;
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_jump_rel);
+
+int cmdq_pkt_eoc(struct cmdq_pkt *pkt)
+{
+ struct cmdq_instruction inst = { {0} };
+
+ inst.op = CMDQ_CODE_EOC;
+ inst.value = CMDQ_EOC_IRQ_EN;
return cmdq_pkt_append_command(pkt, inst);
}
-EXPORT_SYMBOL(cmdq_pkt_jump);
+EXPORT_SYMBOL(cmdq_pkt_eoc);
int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
{
@@ -426,19 +526,4 @@ int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
}
EXPORT_SYMBOL(cmdq_pkt_finalize);
-int cmdq_pkt_flush_async(struct cmdq_pkt *pkt)
-{
- int err;
- struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
-
- err = mbox_send_message(client->chan, pkt);
- if (err < 0)
- return err;
- /* We can send next packet immediately, so just call txdone. */
- mbox_client_txdone(client->chan, 0);
-
- return 0;
-}
-EXPORT_SYMBOL(cmdq_pkt_flush_async);
-
MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c
index 73c256d3950b..b5af1fb5847e 100644
--- a/drivers/soc/mediatek/mtk-mutex.c
+++ b/drivers/soc/mediatek/mtk-mutex.c
@@ -496,6 +496,39 @@ static const unsigned int mt8188_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_MERGE5] = MT8188_MUTEX_MOD_DISP1_VPP_MERGE4,
};
+static const unsigned int mt8188_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
+ [MUTEX_MOD_IDX_MDP_RDMA0] = MT8195_MUTEX_MOD_MDP_RDMA0,
+ [MUTEX_MOD_IDX_MDP_RDMA2] = MT8195_MUTEX_MOD_MDP_RDMA2,
+ [MUTEX_MOD_IDX_MDP_RDMA3] = MT8195_MUTEX_MOD_MDP_RDMA3,
+ [MUTEX_MOD_IDX_MDP_FG0] = MT8195_MUTEX_MOD_MDP_FG0,
+ [MUTEX_MOD_IDX_MDP_FG2] = MT8195_MUTEX_MOD_MDP_FG2,
+ [MUTEX_MOD_IDX_MDP_FG3] = MT8195_MUTEX_MOD_MDP_FG3,
+ [MUTEX_MOD_IDX_MDP_HDR0] = MT8195_MUTEX_MOD_MDP_HDR0,
+ [MUTEX_MOD_IDX_MDP_HDR2] = MT8195_MUTEX_MOD_MDP_HDR2,
+ [MUTEX_MOD_IDX_MDP_HDR3] = MT8195_MUTEX_MOD_MDP_HDR3,
+ [MUTEX_MOD_IDX_MDP_AAL0] = MT8195_MUTEX_MOD_MDP_AAL0,
+ [MUTEX_MOD_IDX_MDP_AAL2] = MT8195_MUTEX_MOD_MDP_AAL2,
+ [MUTEX_MOD_IDX_MDP_AAL3] = MT8195_MUTEX_MOD_MDP_AAL3,
+ [MUTEX_MOD_IDX_MDP_RSZ0] = MT8195_MUTEX_MOD_MDP_RSZ0,
+ [MUTEX_MOD_IDX_MDP_RSZ2] = MT8195_MUTEX_MOD_MDP_RSZ2,
+ [MUTEX_MOD_IDX_MDP_RSZ3] = MT8195_MUTEX_MOD_MDP_RSZ3,
+ [MUTEX_MOD_IDX_MDP_MERGE2] = MT8195_MUTEX_MOD_MDP_MERGE2,
+ [MUTEX_MOD_IDX_MDP_MERGE3] = MT8195_MUTEX_MOD_MDP_MERGE3,
+ [MUTEX_MOD_IDX_MDP_TDSHP0] = MT8195_MUTEX_MOD_MDP_TDSHP0,
+ [MUTEX_MOD_IDX_MDP_TDSHP2] = MT8195_MUTEX_MOD_MDP_TDSHP2,
+ [MUTEX_MOD_IDX_MDP_TDSHP3] = MT8195_MUTEX_MOD_MDP_TDSHP3,
+ [MUTEX_MOD_IDX_MDP_COLOR0] = MT8195_MUTEX_MOD_MDP_COLOR0,
+ [MUTEX_MOD_IDX_MDP_COLOR2] = MT8195_MUTEX_MOD_MDP_COLOR2,
+ [MUTEX_MOD_IDX_MDP_COLOR3] = MT8195_MUTEX_MOD_MDP_COLOR3,
+ [MUTEX_MOD_IDX_MDP_OVL0] = MT8195_MUTEX_MOD_MDP_OVL0,
+ [MUTEX_MOD_IDX_MDP_PAD0] = MT8195_MUTEX_MOD_MDP_PAD0,
+ [MUTEX_MOD_IDX_MDP_PAD2] = MT8195_MUTEX_MOD_MDP_PAD2,
+ [MUTEX_MOD_IDX_MDP_PAD3] = MT8195_MUTEX_MOD_MDP_PAD3,
+ [MUTEX_MOD_IDX_MDP_WROT0] = MT8195_MUTEX_MOD_MDP_WROT0,
+ [MUTEX_MOD_IDX_MDP_WROT2] = MT8195_MUTEX_MOD_MDP_WROT2,
+ [MUTEX_MOD_IDX_MDP_WROT3] = MT8195_MUTEX_MOD_MDP_WROT3,
+};
+
static const unsigned int mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8192_MUTEX_MOD_DISP_AAL0,
[DDP_COMPONENT_CCORR] = MT8192_MUTEX_MOD_DISP_CCORR0,
@@ -735,6 +768,13 @@ static const struct mtk_mutex_data mt8188_mutex_driver_data = {
.mutex_sof_reg = MT8183_MUTEX0_SOF0,
};
+static const struct mtk_mutex_data mt8188_vpp_mutex_driver_data = {
+ .mutex_sof = mt8188_mutex_sof,
+ .mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_sof_reg = MT8183_MUTEX0_SOF0,
+ .mutex_table_mod = mt8188_mdp_mutex_table_mod,
+};
+
static const struct mtk_mutex_data mt8192_mutex_driver_data = {
.mutex_mod = mt8192_mutex_mod,
.mutex_sof = mt8183_mutex_sof,
@@ -1089,6 +1129,7 @@ static const struct of_device_id mutex_driver_dt_match[] = {
{ .compatible = "mediatek,mt8186-disp-mutex", .data = &mt8186_mutex_driver_data },
{ .compatible = "mediatek,mt8186-mdp3-mutex", .data = &mt8186_mdp_mutex_driver_data },
{ .compatible = "mediatek,mt8188-disp-mutex", .data = &mt8188_mutex_driver_data },
+ { .compatible = "mediatek,mt8188-vpp-mutex", .data = &mt8188_vpp_mutex_driver_data },
{ .compatible = "mediatek,mt8192-disp-mutex", .data = &mt8192_mutex_driver_data },
{ .compatible = "mediatek,mt8195-disp-mutex", .data = &mt8195_mutex_driver_data },
{ .compatible = "mediatek,mt8195-vpp-mutex", .data = &mt8195_vpp_mutex_driver_data },
diff --git a/drivers/soc/mediatek/mtk-socinfo.c b/drivers/soc/mediatek/mtk-socinfo.c
index 42572e8c1520..74672a9d6d13 100644
--- a/drivers/soc/mediatek/mtk-socinfo.c
+++ b/drivers/soc/mediatek/mtk-socinfo.c
@@ -48,14 +48,15 @@ static struct socinfo_data socinfo_data_table[] = {
MTK_SOCINFO_ENTRY("MT8183", "MT8183V/AZA", "Kompanio 500", 0x00010043, 0x00000940),
MTK_SOCINFO_ENTRY("MT8186", "MT8186GV/AZA", "Kompanio 520", 0x81861001, CELL_NOT_USED),
MTK_SOCINFO_ENTRY("MT8186T", "MT8186TV/AZA", "Kompanio 528", 0x81862001, CELL_NOT_USED),
- MTK_SOCINFO_ENTRY("MT8188", "MT8188GV/AZA", "Kompanio 830", 0x81880000, 0x00000010),
- MTK_SOCINFO_ENTRY("MT8188", "MT8188GV/HZA", "Kompanio 830", 0x81880000, 0x00000011),
+ MTK_SOCINFO_ENTRY("MT8188", "MT8188GV/AZA", "Kompanio 838", 0x81880000, 0x00000010),
+ MTK_SOCINFO_ENTRY("MT8188", "MT8188GV/HZA", "Kompanio 838", 0x81880000, 0x00000011),
MTK_SOCINFO_ENTRY("MT8192", "MT8192V/AZA", "Kompanio 820", 0x00001100, 0x00040080),
MTK_SOCINFO_ENTRY("MT8192T", "MT8192V/ATZA", "Kompanio 828", 0x00000100, 0x000400C0),
MTK_SOCINFO_ENTRY("MT8195", "MT8195GV/EZA", "Kompanio 1200", 0x81950300, CELL_NOT_USED),
MTK_SOCINFO_ENTRY("MT8195", "MT8195GV/EHZA", "Kompanio 1200", 0x81950304, CELL_NOT_USED),
MTK_SOCINFO_ENTRY("MT8195", "MT8195TV/EZA", "Kompanio 1380", 0x81950400, CELL_NOT_USED),
MTK_SOCINFO_ENTRY("MT8195", "MT8195TV/EHZA", "Kompanio 1380", 0x81950404, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8395", "MT8395AV/ZA", "Genio 1200", 0x83950100, CELL_NOT_USED),
};
static int mtk_socinfo_create_socinfo_node(struct mtk_socinfo *mtk_socinfop)
@@ -144,7 +145,14 @@ static int mtk_socinfo_get_socinfo_data(struct mtk_socinfo *mtk_socinfop)
}
}
- return match_socinfo_index >= 0 ? match_socinfo_index : -ENOENT;
+ if (match_socinfo_index < 0) {
+ dev_warn(mtk_socinfop->dev,
+ "Unknown MediaTek SoC with ID 0x%08x 0x%08x\n",
+ cell_data[0], cell_data[1]);
+ return -ENOENT;
+ }
+
+ return match_socinfo_index;
}
static int mtk_socinfo_probe(struct platform_device *pdev)
diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c
index c832f5c670bc..9a91298c1253 100644
--- a/drivers/soc/mediatek/mtk-svs.c
+++ b/drivers/soc/mediatek/mtk-svs.c
@@ -1768,6 +1768,7 @@ static int svs_bank_resource_setup(struct svs_platform *svsp)
const struct svs_bank_pdata *bdata;
struct svs_bank *svsb;
struct dev_pm_opp *opp;
+ char tz_name_buf[20];
unsigned long freq;
int count, ret;
u32 idx, i;
@@ -1819,10 +1820,12 @@ static int svs_bank_resource_setup(struct svs_platform *svsp)
}
if (!IS_ERR_OR_NULL(bdata->tzone_name)) {
- svsb->tzd = thermal_zone_get_zone_by_name(bdata->tzone_name);
+ snprintf(tz_name_buf, ARRAY_SIZE(tz_name_buf),
+ "%s-thermal", bdata->tzone_name);
+ svsb->tzd = thermal_zone_get_zone_by_name(tz_name_buf);
if (IS_ERR(svsb->tzd)) {
dev_err(svsb->dev, "cannot get \"%s\" thermal zone\n",
- bdata->tzone_name);
+ tz_name_buf);
return PTR_ERR(svsb->tzd);
}
}
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index a5fd68411bed..d84572662017 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -1,6 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved. */
+/*
+ * Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/bitfield.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -17,6 +21,8 @@
#define MAX_SLV_ID 8
#define SLAVE_ID_MASK 0x7
#define SLAVE_ID_SHIFT 16
+#define SLAVE_ID(addr) FIELD_GET(GENMASK(19, 16), addr)
+#define VRM_ADDR(addr) FIELD_GET(GENMASK(19, 4), addr)
/**
* struct entry_header: header for each entry in cmddb
@@ -147,12 +153,7 @@ static int cmd_db_get_header(const char *id, const struct entry_header **eh,
if (ret)
return ret;
- /*
- * Pad out query string to same length as in DB. NOTE: the output
- * query string is not necessarily '\0' terminated if it bumps up
- * against the max size. That's OK and expected.
- */
- strncpy(query, id, sizeof(query));
+ strtomem_pad(query, id, 0);
for (i = 0; i < MAX_SLV_ID; i++) {
rsc_hdr = &cmd_db_header->header[i];
@@ -221,6 +222,30 @@ const void *cmd_db_read_aux_data(const char *id, size_t *len)
EXPORT_SYMBOL_GPL(cmd_db_read_aux_data);
/**
+ * cmd_db_match_resource_addr() - Compare if both Resource addresses are same
+ *
+ * @addr1: Resource address to compare
+ * @addr2: Resource address to compare
+ *
+ * Return: true if two addresses refer to the same resource, false otherwise
+ */
+bool cmd_db_match_resource_addr(u32 addr1, u32 addr2)
+{
+ /*
+ * Each RPMh VRM accelerator resource has 3 or 4 contiguous 4-byte
+ * aligned addresses associated with it. Ignore the offset to check
+ * for VRM requests.
+ */
+ if (addr1 == addr2)
+ return true;
+ else if (SLAVE_ID(addr1) == CMD_DB_HW_VRM && VRM_ADDR(addr1) == VRM_ADDR(addr2))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(cmd_db_match_resource_addr);
+
+/**
* cmd_db_read_slave_id - Get the slave ID for a given resource address
*
* @id: Resource id to query the DB for version
@@ -362,7 +387,7 @@ static int __init cmd_db_device_init(void)
{
return platform_driver_register(&cmd_db_dev_driver);
}
-arch_initcall(cmd_db_device_init);
+core_initcall(cmd_db_device_init);
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Command DB Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c
index 656706259353..fb323b3364db 100644
--- a/drivers/soc/qcom/icc-bwmon.c
+++ b/drivers/soc/qcom/icc-bwmon.c
@@ -282,7 +282,7 @@ static const struct regmap_config msm8998_bwmon_regmap_cfg = {
* Cache is necessary for using regmap fields with non-readable
* registers.
*/
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_config msm8998_bwmon_global_regmap_cfg = {
@@ -301,7 +301,7 @@ static const struct regmap_config msm8998_bwmon_global_regmap_cfg = {
* Cache is necessary for using regmap fields with non-readable
* registers.
*/
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct reg_field sdm845_cpu_bwmon_reg_fields[] = {
@@ -369,7 +369,7 @@ static const struct regmap_config sdm845_cpu_bwmon_regmap_cfg = {
* Cache is necessary for using regmap fields with non-readable
* registers.
*/
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
/* BWMON v5 */
@@ -446,7 +446,7 @@ static const struct regmap_config sdm845_llcc_bwmon_regmap_cfg = {
* Cache is necessary for using regmap fields with non-readable
* registers.
*/
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static void bwmon_clear_counters(struct icc_bwmon *bwmon, bool clear_all)
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
index f913e9bd57ed..823fd108fa03 100644
--- a/drivers/soc/qcom/pmic_glink.c
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/soc/qcom/pdr.h>
#include <linux/soc/qcom/pmic_glink.h>
+#include <linux/spinlock.h>
enum {
PMIC_GLINK_CLIENT_BATT = 0,
@@ -36,7 +37,7 @@ struct pmic_glink {
unsigned int pdr_state;
/* serializing clients list updates */
- struct mutex client_lock;
+ spinlock_t client_lock;
struct list_head clients;
};
@@ -58,10 +59,11 @@ static void _devm_pmic_glink_release_client(struct device *dev, void *res)
{
struct pmic_glink_client *client = (struct pmic_glink_client *)res;
struct pmic_glink *pg = client->pg;
+ unsigned long flags;
- mutex_lock(&pg->client_lock);
+ spin_lock_irqsave(&pg->client_lock, flags);
list_del(&client->node);
- mutex_unlock(&pg->client_lock);
+ spin_unlock_irqrestore(&pg->client_lock, flags);
}
struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev,
@@ -72,6 +74,7 @@ struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev,
{
struct pmic_glink_client *client;
struct pmic_glink *pg = dev_get_drvdata(dev->parent);
+ unsigned long flags;
client = devres_alloc(_devm_pmic_glink_release_client, sizeof(*client), GFP_KERNEL);
if (!client)
@@ -83,9 +86,14 @@ struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev,
client->pdr_notify = pdr;
client->priv = priv;
- mutex_lock(&pg->client_lock);
+ mutex_lock(&pg->state_lock);
+ spin_lock_irqsave(&pg->client_lock, flags);
+
list_add(&client->node, &pg->clients);
- mutex_unlock(&pg->client_lock);
+ client->pdr_notify(client->priv, pg->client_state);
+
+ spin_unlock_irqrestore(&pg->client_lock, flags);
+ mutex_unlock(&pg->state_lock);
devres_add(dev, client);
@@ -107,6 +115,7 @@ static int pmic_glink_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
struct pmic_glink_client *client;
struct pmic_glink_hdr *hdr;
struct pmic_glink *pg = dev_get_drvdata(&rpdev->dev);
+ unsigned long flags;
if (len < sizeof(*hdr)) {
dev_warn(pg->dev, "ignoring truncated message\n");
@@ -115,10 +124,12 @@ static int pmic_glink_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
hdr = data;
+ spin_lock_irqsave(&pg->client_lock, flags);
list_for_each_entry(client, &pg->clients, node) {
if (client->id == le32_to_cpu(hdr->owner))
client->cb(data, len, client->priv);
}
+ spin_unlock_irqrestore(&pg->client_lock, flags);
return 0;
}
@@ -158,6 +169,7 @@ static void pmic_glink_state_notify_clients(struct pmic_glink *pg)
{
struct pmic_glink_client *client;
unsigned int new_state = pg->client_state;
+ unsigned long flags;
if (pg->client_state != SERVREG_SERVICE_STATE_UP) {
if (pg->pdr_state == SERVREG_SERVICE_STATE_UP && pg->ept)
@@ -168,8 +180,10 @@ static void pmic_glink_state_notify_clients(struct pmic_glink *pg)
}
if (new_state != pg->client_state) {
+ spin_lock_irqsave(&pg->client_lock, flags);
list_for_each_entry(client, &pg->clients, node)
client->pdr_notify(client->priv, new_state);
+ spin_unlock_irqrestore(&pg->client_lock, flags);
pg->client_state = new_state;
}
}
@@ -256,7 +270,7 @@ static int pmic_glink_probe(struct platform_device *pdev)
pg->dev = &pdev->dev;
INIT_LIST_HEAD(&pg->clients);
- mutex_init(&pg->client_lock);
+ spin_lock_init(&pg->client_lock);
mutex_init(&pg->state_lock);
match_data = (unsigned long *)of_device_get_match_data(&pdev->dev);
diff --git a/drivers/soc/qcom/pmic_pdcharger_ulog.c b/drivers/soc/qcom/pmic_pdcharger_ulog.c
index 238cd38589dc..39f412bbf2c1 100644
--- a/drivers/soc/qcom/pmic_pdcharger_ulog.c
+++ b/drivers/soc/qcom/pmic_pdcharger_ulog.c
@@ -150,6 +150,10 @@ static const struct rpmsg_device_id pmic_pdcharger_ulog_rpmsg_id_match[] = {
{ "PMIC_LOGS_ADSP_APPS" },
{}
};
+/*
+ * No MODULE_DEVICE_TABLE intentionally: that's a debugging module, to be
+ * loaded manually only.
+ */
static struct rpmsg_driver pmic_pdcharger_ulog_rpmsg_driver = {
.probe = pmic_pdcharger_ulog_rpmsg_probe,
diff --git a/drivers/soc/qcom/qcom_stats.c b/drivers/soc/qcom/qcom_stats.c
index 0216fc24f2ca..c429d5154aae 100644
--- a/drivers/soc/qcom/qcom_stats.c
+++ b/drivers/soc/qcom/qcom_stats.c
@@ -35,11 +35,15 @@ static const struct subsystem_data subsystems[] = {
{ "wpss", 605, 13 },
{ "adsp", 606, 2 },
{ "cdsp", 607, 5 },
+ { "cdsp1", 607, 12 },
+ { "gpdsp0", 607, 17 },
+ { "gpdsp1", 607, 18 },
{ "slpi", 608, 3 },
{ "gpu", 609, 0 },
{ "display", 610, 0 },
{ "adsp_island", 613, 2 },
{ "slpi_island", 613, 3 },
+ { "apss", 631, QCOM_SMEM_HOST_ANY },
};
struct stats_config {
diff --git a/drivers/soc/qcom/rpm_master_stats.c b/drivers/soc/qcom/rpm_master_stats.c
index 9ca13bcf67d3..086fe4ba6707 100644
--- a/drivers/soc/qcom/rpm_master_stats.c
+++ b/drivers/soc/qcom/rpm_master_stats.c
@@ -148,6 +148,10 @@ static const struct of_device_id rpm_master_table[] = {
{ .compatible = "qcom,rpm-master-stats" },
{ },
};
+/*
+ * No MODULE_DEVICE_TABLE intentionally: that's a debugging module, to be
+ * loaded manually only.
+ */
static struct platform_driver master_stats_driver = {
.probe = master_stats_probe,
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index a021dc71807b..561d8037b50a 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
@@ -557,7 +558,7 @@ static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
addr = read_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_ADDR], i, j);
for (k = 0; k < msg->num_cmds; k++) {
- if (addr == msg->cmds[k].addr)
+ if (cmd_db_match_resource_addr(msg->cmds[k].addr, addr))
return -EBUSY;
}
}
@@ -1154,7 +1155,7 @@ static int __init rpmh_driver_init(void)
{
return platform_driver_register(&rpmh_driver);
}
-arch_initcall(rpmh_driver_init);
+core_initcall(rpmh_driver_init);
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index e8ff9819ac47..277c07a6603d 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -133,6 +133,7 @@ static const char *const pmic_models[] = {
[72] = "PMR735D",
[73] = "PM8550",
[74] = "PMK8550",
+ [82] = "SMB2360",
};
struct socinfo_params {
@@ -430,6 +431,7 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(QRU1000) },
{ qcom_board_id(SM8475_2) },
{ qcom_board_id(QDU1000) },
+ { qcom_board_id(X1E80100) },
{ qcom_board_id(SM8650) },
{ qcom_board_id(SM4450) },
{ qcom_board_id(QDU1010) },
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 5deca747fb77..5d94c3f31494 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -24,6 +24,7 @@ config ARCH_RCAR_GEN2
select RENESAS_IRQC
select RST_RCAR
select SYS_SUPPORTS_SH_CMT
+ select SYS_SUPPORTS_SH_TMU
config ARCH_RCAR_GEN3
bool
@@ -344,6 +345,11 @@ config ARCH_R9A09G011
help
This enables support for the Renesas RZ/V2M SoC.
+config ARCH_R9A09G057
+ bool "ARM64 Platform support for RZ/V2H(P)"
+ help
+ This enables support for the Renesas RZ/V2H(P) SoC variants.
+
endif # ARM64
if RISCV
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 8f9b8d3736dc..172d59e6fbcf 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -75,6 +75,10 @@ static const struct renesas_family fam_rzg3s __initconst __maybe_unused = {
.name = "RZ/G3S",
};
+static const struct renesas_family fam_rzv2h __initconst __maybe_unused = {
+ .name = "RZ/V2H",
+};
+
static const struct renesas_family fam_rzv2l __initconst __maybe_unused = {
.name = "RZ/V2L",
};
@@ -177,6 +181,11 @@ static const struct renesas_soc soc_rz_g3s __initconst __maybe_unused = {
.id = 0x85e0447,
};
+static const struct renesas_soc soc_rz_v2h __initconst __maybe_unused = {
+ .family = &fam_rzv2h,
+ .id = 0x847a447,
+};
+
static const struct renesas_soc soc_rz_v2l __initconst __maybe_unused = {
.family = &fam_rzv2l,
.id = 0x8447447,
@@ -407,6 +416,9 @@ static const struct of_device_id renesas_socs[] __initconst __maybe_unused = {
#ifdef CONFIG_ARCH_R9A09G011
{ .compatible = "renesas,r9a09g011", .data = &soc_rz_v2m },
#endif
+#ifdef CONFIG_ARCH_R9A09G057
+ { .compatible = "renesas,r9a09g057", .data = &soc_rz_v2h },
+#endif
#ifdef CONFIG_ARCH_SH73A0
{ .compatible = "renesas,sh73a0", .data = &soc_shmobile_ag5 },
#endif
@@ -432,6 +444,11 @@ static const struct renesas_id id_rzg2l __initconst = {
.mask = 0xfffffff,
};
+static const struct renesas_id id_rzv2h __initconst = {
+ .offset = 0x304,
+ .mask = 0xfffffff,
+};
+
static const struct renesas_id id_rzv2m __initconst = {
.offset = 0x104,
.mask = 0xff,
@@ -449,6 +466,7 @@ static const struct of_device_id renesas_ids[] __initconst = {
{ .compatible = "renesas,r9a07g054-sysc", .data = &id_rzg2l },
{ .compatible = "renesas,r9a08g045-sysc", .data = &id_rzg2l },
{ .compatible = "renesas,r9a09g011-sys", .data = &id_rzv2m },
+ { .compatible = "renesas,r9a09g057-sys", .data = &id_rzv2h },
{ .compatible = "renesas,prr", .data = &id_prr },
{ /* sentinel */ }
};
@@ -513,7 +531,7 @@ static int __init renesas_soc_init(void)
eslo = product & 0xf;
soc_dev_attr->revision = kasprintf(GFP_KERNEL, "ES%u.%u",
eshi, eslo);
- } else if (id == &id_rzg2l) {
+ } else if (id == &id_rzg2l || id == &id_rzv2h) {
eshi = ((product >> 28) & 0x0f);
soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%u",
eshi);
diff --git a/drivers/soc/samsung/exynos-asv.c b/drivers/soc/samsung/exynos-asv.c
index d60af8acc391..97006cc3b946 100644
--- a/drivers/soc/samsung/exynos-asv.c
+++ b/drivers/soc/samsung/exynos-asv.c
@@ -11,6 +11,7 @@
#include <linux/cpu.h>
#include <linux/device.h>
+#include <linux/energy_model.h>
#include <linux/errno.h>
#include <linux/of.h>
#include <linux/pm_opp.h>
@@ -97,9 +98,16 @@ static int exynos_asv_update_opps(struct exynos_asv *asv)
last_opp_table = opp_table;
ret = exynos_asv_update_cpu_opps(asv, cpu);
- if (ret < 0)
+ if (!ret) {
+ /*
+ * Update EM power values since OPP
+ * voltage values may have changed.
+ */
+ em_dev_update_chip_binning(cpu);
+ } else {
dev_err(asv->dev, "Couldn't udate OPPs for cpu%d\n",
cpuid);
+ }
}
dev_pm_opp_put_opp_table(opp_table);
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index d6bfcea5ee65..91d0ad6ddefc 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -4074,6 +4074,7 @@ static const char * const tegra194_reset_sources[] = {
};
static const struct tegra_wake_event tegra194_wake_events[] = {
+ TEGRA_WAKE_GPIO("eqos", 20, 0, TEGRA194_MAIN_GPIO(G, 4)),
TEGRA_WAKE_IRQ("pmu", 24, 209),
TEGRA_WAKE_GPIO("power", 29, 1, TEGRA194_AON_GPIO(EE, 4)),
TEGRA_WAKE_IRQ("rtc", 73, 10),
@@ -4210,6 +4211,7 @@ static const char * const tegra234_reset_sources[] = {
static const struct tegra_wake_event tegra234_wake_events[] = {
TEGRA_WAKE_GPIO("sd-wake", 8, 0, TEGRA234_MAIN_GPIO(G, 7)),
+ TEGRA_WAKE_GPIO("eqos", 20, 0, TEGRA234_MAIN_GPIO(G, 4)),
TEGRA_WAKE_IRQ("pmu", 24, 209),
TEGRA_WAKE_GPIO("power", 29, 1, TEGRA234_AON_GPIO(EE, 4)),
TEGRA_WAKE_GPIO("mgbe", 56, 0, TEGRA234_MAIN_GPIO(Y, 3)),
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index 6a1c6b34c414..88f774db9208 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -16,7 +16,6 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/omap-mailbox.h>
#include <linux/platform_device.h>
#include <linux/remoteproc.h>
#include <linux/suspend.h>
@@ -314,7 +313,6 @@ static irqreturn_t wkup_m3_txev_handler(int irq, void *ipc_data)
static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
{
struct device *dev = m3_ipc->dev;
- mbox_msg_t dummy_msg = 0;
int ret;
if (!m3_ipc->mbox) {
@@ -330,7 +328,7 @@ static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
* the RX callback to avoid multiple interrupts being received
* by the CM3.
*/
- ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
+ ret = mbox_send_message(m3_ipc->mbox, NULL);
if (ret < 0) {
dev_err(dev, "%s: mbox_send_message() failed: %d\n",
__func__, ret);
@@ -352,7 +350,6 @@ static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
static int wkup_m3_ping_noirq(struct wkup_m3_ipc *m3_ipc)
{
struct device *dev = m3_ipc->dev;
- mbox_msg_t dummy_msg = 0;
int ret;
if (!m3_ipc->mbox) {
@@ -361,7 +358,7 @@ static int wkup_m3_ping_noirq(struct wkup_m3_ipc *m3_ipc)
return -EIO;
}
- ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
+ ret = mbox_send_message(m3_ipc->mbox, NULL);
if (ret < 0) {
dev_err(dev, "%s: mbox_send_message() failed: %d\n",
__func__, ret);
diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
index 7cd24bd8e224..6bcf8e75273c 100644
--- a/drivers/soundwire/amd_manager.c
+++ b/drivers/soundwire/amd_manager.c
@@ -130,6 +130,19 @@ static void amd_sdw_set_frameshape(struct amd_sdw_manager *amd_manager)
writel(frame_size, amd_manager->mmio + ACP_SW_FRAMESIZE);
}
+static void amd_sdw_wake_enable(struct amd_sdw_manager *amd_manager, bool enable)
+{
+ u32 wake_ctrl;
+
+ wake_ctrl = readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
+ if (enable)
+ wake_ctrl |= AMD_SDW_WAKE_INTR_MASK;
+ else
+ wake_ctrl &= ~AMD_SDW_WAKE_INTR_MASK;
+
+ writel(wake_ctrl, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
+}
+
static void amd_sdw_ctl_word_prep(u32 *lower_word, u32 *upper_word, struct sdw_msg *msg,
int cmd_offset)
{
@@ -1095,6 +1108,7 @@ static int __maybe_unused amd_suspend(struct device *dev)
}
if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
+ amd_sdw_wake_enable(amd_manager, false);
return amd_sdw_clock_stop(amd_manager);
} else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
/*
@@ -1121,6 +1135,7 @@ static int __maybe_unused amd_suspend_runtime(struct device *dev)
return 0;
}
if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
+ amd_sdw_wake_enable(amd_manager, true);
return amd_sdw_clock_stop(amd_manager);
} else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
ret = amd_sdw_clock_stop(amd_manager);
diff --git a/drivers/soundwire/amd_manager.h b/drivers/soundwire/amd_manager.h
index 418b679e0b1a..707065468e05 100644
--- a/drivers/soundwire/amd_manager.h
+++ b/drivers/soundwire/amd_manager.h
@@ -152,7 +152,7 @@
#define AMD_SDW0_EXT_INTR_MASK 0x200000
#define AMD_SDW1_EXT_INTR_MASK 4
#define AMD_SDW_IRQ_MASK_0TO7 0x77777777
-#define AMD_SDW_IRQ_MASK_8TO11 0x000d7777
+#define AMD_SDW_IRQ_MASK_8TO11 0x000c7777
#define AMD_SDW_IRQ_ERROR_MASK 0xff
#define AMD_SDW_MAX_FREQ_NUM 1
#define AMD_SDW0_MAX_TX_PORTS 3
@@ -190,6 +190,7 @@
#define AMD_SDW_CLK_RESUME_REQ 2
#define AMD_SDW_CLK_RESUME_DONE 3
#define AMD_SDW_WAKE_STAT_MASK BIT(16)
+#define AMD_SDW_WAKE_INTR_MASK BIT(16)
static u32 amd_sdw_freq_tbl[AMD_SDW_MAX_FREQ_NUM] = {
AMD_SDW_DEFAULT_CLK_FREQ,
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 7cc219d78551..e358ac5b4509 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -623,7 +623,7 @@ static int spi_engine_probe(struct platform_device *pdev)
version = readl(spi_engine->base + ADI_AXI_REG_VERSION);
if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) {
- dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n",
+ dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%u\n",
ADI_AXI_PCORE_VER_MAJOR(version),
ADI_AXI_PCORE_VER_MINOR(version),
ADI_AXI_PCORE_VER_PATCH(version));
diff --git a/drivers/spi/spi-hisi-kunpeng.c b/drivers/spi/spi-hisi-kunpeng.c
index 35ef5e8e2ffd..77e9738e42f6 100644
--- a/drivers/spi/spi-hisi-kunpeng.c
+++ b/drivers/spi/spi-hisi-kunpeng.c
@@ -151,8 +151,6 @@ static const struct debugfs_reg32 hisi_spi_regs[] = {
HISI_SPI_DBGFS_REG("ENR", HISI_SPI_ENR),
HISI_SPI_DBGFS_REG("FIFOC", HISI_SPI_FIFOC),
HISI_SPI_DBGFS_REG("IMR", HISI_SPI_IMR),
- HISI_SPI_DBGFS_REG("DIN", HISI_SPI_DIN),
- HISI_SPI_DBGFS_REG("DOUT", HISI_SPI_DOUT),
HISI_SPI_DBGFS_REG("SR", HISI_SPI_SR),
HISI_SPI_DBGFS_REG("RISR", HISI_SPI_RISR),
HISI_SPI_DBGFS_REG("ISR", HISI_SPI_ISR),
diff --git a/drivers/spi/spi-microchip-core-qspi.c b/drivers/spi/spi-microchip-core-qspi.c
index 03d125a71fd9..09f16471c537 100644
--- a/drivers/spi/spi-microchip-core-qspi.c
+++ b/drivers/spi/spi-microchip-core-qspi.c
@@ -283,6 +283,7 @@ static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_devi
}
control = readl_relaxed(qspi->regs + REG_CONTROL);
+ control &= ~CONTROL_CLKRATE_MASK;
control |= baud_rate_val << CONTROL_CLKRATE_SHIFT;
writel_relaxed(control, qspi->regs + REG_CONTROL);
control = readl_relaxed(qspi->regs + REG_CONTROL);
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index e4e7ddb7524a..4a68abcdcc35 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -1016,10 +1016,8 @@ end_irq:
static irqreturn_t stm32fx_spi_irq_thread(int irq, void *dev_id)
{
struct spi_controller *ctrl = dev_id;
- struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
spi_finalize_current_transfer(ctrl);
- stm32fx_spi_disable(spi);
return IRQ_HANDLED;
}
@@ -1187,6 +1185,8 @@ static int stm32_spi_prepare_msg(struct spi_controller *ctrl,
~clrb) | setb,
spi->base + spi->cfg->regs->cpol.reg);
+ stm32_spi_enable(spi);
+
spin_unlock_irqrestore(&spi->lock, flags);
return 0;
@@ -1204,7 +1204,6 @@ static void stm32fx_spi_dma_tx_cb(void *data)
if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
spi_finalize_current_transfer(spi->ctrl);
- stm32fx_spi_disable(spi);
}
}
@@ -1219,7 +1218,6 @@ static void stm32_spi_dma_rx_cb(void *data)
struct stm32_spi *spi = data;
spi_finalize_current_transfer(spi->ctrl);
- spi->cfg->disable(spi);
}
/**
@@ -1307,8 +1305,6 @@ static int stm32fx_spi_transfer_one_irq(struct stm32_spi *spi)
stm32_spi_set_bits(spi, STM32FX_SPI_CR2, cr2);
- stm32_spi_enable(spi);
-
/* starting data transfer when buffer is loaded */
if (spi->tx_buf)
spi->cfg->write_tx(spi);
@@ -1345,8 +1341,6 @@ static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi)
spin_lock_irqsave(&spi->lock, flags);
- stm32_spi_enable(spi);
-
/* Be sure to have data in fifo before starting data transfer */
if (spi->tx_buf)
stm32h7_spi_write_txfifo(spi);
@@ -1378,8 +1372,6 @@ static void stm32fx_spi_transfer_one_dma_start(struct stm32_spi *spi)
*/
stm32_spi_set_bits(spi, STM32FX_SPI_CR2, STM32FX_SPI_CR2_ERRIE);
}
-
- stm32_spi_enable(spi);
}
/**
@@ -1413,8 +1405,6 @@ static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
stm32_spi_set_bits(spi, STM32H7_SPI_IER, ier);
- stm32_spi_enable(spi);
-
if (STM32_SPI_HOST_MODE(spi))
stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ff75838c1b5d..a2c467d9e92f 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -4523,6 +4523,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
wait_for_completion(&done);
status = message->status;
}
+ message->complete = NULL;
message->context = NULL;
return status;
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index c1fbcdd16182..c40217f44b1b 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -3672,6 +3672,8 @@ static int __init target_core_init_configfs(void)
{
struct configfs_subsystem *subsys = &target_core_fabrics;
struct t10_alua_lu_gp *lu_gp;
+ struct cred *kern_cred;
+ const struct cred *old_cred;
int ret;
pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
@@ -3748,11 +3750,21 @@ static int __init target_core_init_configfs(void)
if (ret < 0)
goto out;
+ /* We use the kernel credentials to access the target directory */
+ kern_cred = prepare_kernel_cred(&init_task);
+ if (!kern_cred) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ old_cred = override_creds(kern_cred);
target_init_dbroot();
+ revert_creds(old_cred);
+ put_cred(kern_cred);
return 0;
out:
+ target_xcopy_release_pt();
configfs_unregister_subsystem(subsys);
core_dev_release_virtual_lun0();
rd_module_exit();
diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig
index 73a147202e88..61b507c18780 100644
--- a/drivers/tee/Kconfig
+++ b/drivers/tee/Kconfig
@@ -15,5 +15,6 @@ if TEE
source "drivers/tee/optee/Kconfig"
source "drivers/tee/amdtee/Kconfig"
+source "drivers/tee/tstee/Kconfig"
endif
diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile
index 68da044afbfa..5488cba30bd2 100644
--- a/drivers/tee/Makefile
+++ b/drivers/tee/Makefile
@@ -5,3 +5,4 @@ tee-objs += tee_shm.o
tee-objs += tee_shm_pool.o
obj-$(CONFIG_OPTEE) += optee/
obj-$(CONFIG_AMDTEE) += amdtee/
+obj-$(CONFIG_ARM_TSTEE) += tstee/
diff --git a/drivers/tee/amdtee/amdtee_private.h b/drivers/tee/amdtee/amdtee_private.h
index 6d0f7062bb87..d87050033894 100644
--- a/drivers/tee/amdtee/amdtee_private.h
+++ b/drivers/tee/amdtee/amdtee_private.h
@@ -9,7 +9,7 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/kref.h>
#include <linux/types.h>
#include "amdtee_if.h"
diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c
index e9b63dcb3194..4c21b02be4af 100644
--- a/drivers/tee/amdtee/call.c
+++ b/drivers/tee/amdtee/call.c
@@ -5,7 +5,7 @@
#include <linux/device.h>
#include <linux/tee.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/psp-tee.h>
#include <linux/slab.h>
#include <linux/psp.h>
diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
index 3c15f6a9e91c..e487231d25dc 100644
--- a/drivers/tee/amdtee/core.c
+++ b/drivers/tee/amdtee/core.c
@@ -9,13 +9,12 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/device.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
#include "amdtee_private.h"
-#include "../tee_private.h"
#include <linux/psp-tee.h>
static struct amdtee_driver_data *drv_data;
diff --git a/drivers/tee/amdtee/shm_pool.c b/drivers/tee/amdtee/shm_pool.c
index f0303126f199..6346e0bc8a64 100644
--- a/drivers/tee/amdtee/shm_pool.c
+++ b/drivers/tee/amdtee/shm_pool.c
@@ -4,7 +4,7 @@
*/
#include <linux/slab.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/psp.h>
#include "amdtee_private.h"
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index a91e50be11be..16eb953e14bb 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -7,7 +7,7 @@
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/types.h>
#include "optee_private.h"
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 3aed554bc8d8..39e688d4e974 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -9,77 +9,13 @@
#include <linux/crash_dump.h>
#include <linux/errno.h>
#include <linux/io.h>
-#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/types.h>
#include "optee_private.h"
-int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
- size_t size, size_t align,
- int (*shm_register)(struct tee_context *ctx,
- struct tee_shm *shm,
- struct page **pages,
- size_t num_pages,
- unsigned long start))
-{
- size_t nr_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
- struct page **pages;
- unsigned int i;
- int rc = 0;
-
- /*
- * Ignore alignment since this is already going to be page aligned
- * and there's no need for any larger alignment.
- */
- shm->kaddr = alloc_pages_exact(nr_pages * PAGE_SIZE,
- GFP_KERNEL | __GFP_ZERO);
- if (!shm->kaddr)
- return -ENOMEM;
-
- shm->paddr = virt_to_phys(shm->kaddr);
- shm->size = nr_pages * PAGE_SIZE;
-
- pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
- if (!pages) {
- rc = -ENOMEM;
- goto err;
- }
-
- for (i = 0; i < nr_pages; i++)
- pages[i] = virt_to_page((u8 *)shm->kaddr + i * PAGE_SIZE);
-
- shm->pages = pages;
- shm->num_pages = nr_pages;
-
- if (shm_register) {
- rc = shm_register(shm->ctx, shm, pages, nr_pages,
- (unsigned long)shm->kaddr);
- if (rc)
- goto err;
- }
-
- return 0;
-err:
- free_pages_exact(shm->kaddr, shm->size);
- shm->kaddr = NULL;
- return rc;
-}
-
-void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
- int (*shm_unregister)(struct tee_context *ctx,
- struct tee_shm *shm))
-{
- if (shm_unregister)
- shm_unregister(shm->ctx, shm);
- free_pages_exact(shm->kaddr, shm->size);
- shm->kaddr = NULL;
- kfree(shm->pages);
- shm->pages = NULL;
-}
-
static void optee_bus_scan(struct work_struct *work)
{
WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP));
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
index 1892e49a8e6a..d296c70ddfdc 100644
--- a/drivers/tee/optee/device.c
+++ b/drivers/tee/optee/device.c
@@ -7,7 +7,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/uuid.h>
#include "optee_private.h"
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
index ecb5eb079408..3235e1c719e8 100644
--- a/drivers/tee/optee/ffa_abi.c
+++ b/drivers/tee/optee/ffa_abi.c
@@ -11,7 +11,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/types.h>
#include "optee_private.h"
#include "optee_ffa.h"
@@ -374,14 +374,14 @@ static int optee_ffa_shm_unregister_supp(struct tee_context *ctx,
static int pool_ffa_op_alloc(struct tee_shm_pool *pool,
struct tee_shm *shm, size_t size, size_t align)
{
- return optee_pool_op_alloc_helper(pool, shm, size, align,
- optee_ffa_shm_register);
+ return tee_dyn_shm_alloc_helper(shm, size, align,
+ optee_ffa_shm_register);
}
static void pool_ffa_op_free(struct tee_shm_pool *pool,
struct tee_shm *shm)
{
- optee_pool_op_free_helper(pool, shm, optee_ffa_shm_unregister);
+ tee_dyn_shm_free_helper(shm, optee_ffa_shm_unregister);
}
static void pool_ffa_op_destroy_pool(struct tee_shm_pool *pool)
diff --git a/drivers/tee/optee/notif.c b/drivers/tee/optee/notif.c
index 05212842b0a5..0d7878e770cd 100644
--- a/drivers/tee/optee/notif.c
+++ b/drivers/tee/optee/notif.c
@@ -9,7 +9,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include "optee_private.h"
struct notif_entry {
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index 7a5243c78b55..429cc20be5cc 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -9,7 +9,7 @@
#include <linux/arm-smccc.h>
#include <linux/rhashtable.h>
#include <linux/semaphore.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/types.h>
#include "optee_msg.h"
@@ -283,18 +283,6 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
int optee_enumerate_devices(u32 func);
void optee_unregister_devices(void);
-int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
- size_t size, size_t align,
- int (*shm_register)(struct tee_context *ctx,
- struct tee_shm *shm,
- struct page **pages,
- size_t num_pages,
- unsigned long start));
-void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
- int (*shm_unregister)(struct tee_context *ctx,
- struct tee_shm *shm));
-
-
void optee_remove_common(struct optee *optee);
int optee_open(struct tee_context *ctx, bool cap_memref_null);
void optee_release(struct tee_context *ctx);
diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
index e69bc6380683..f086812f1179 100644
--- a/drivers/tee/optee/rpc.c
+++ b/drivers/tee/optee/rpc.c
@@ -8,7 +8,7 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/slab.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include "optee_private.h"
#include "optee_rpc_cmd.h"
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index a37f87087e5c..844285d4f03c 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -23,7 +23,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include "optee_private.h"
@@ -592,19 +592,18 @@ static int pool_op_alloc(struct tee_shm_pool *pool,
* to be registered with OP-TEE.
*/
if (shm->flags & TEE_SHM_PRIV)
- return optee_pool_op_alloc_helper(pool, shm, size, align, NULL);
+ return tee_dyn_shm_alloc_helper(shm, size, align, NULL);
- return optee_pool_op_alloc_helper(pool, shm, size, align,
- optee_shm_register);
+ return tee_dyn_shm_alloc_helper(shm, size, align, optee_shm_register);
}
static void pool_op_free(struct tee_shm_pool *pool,
struct tee_shm *shm)
{
if (!(shm->flags & TEE_SHM_PRIV))
- optee_pool_op_free_helper(pool, shm, optee_shm_unregister);
+ tee_dyn_shm_free_helper(shm, optee_shm_unregister);
else
- optee_pool_op_free_helper(pool, shm, NULL);
+ tee_dyn_shm_free_helper(shm, NULL);
}
static void pool_op_destroy_pool(struct tee_shm_pool *pool)
@@ -1433,7 +1432,7 @@ static optee_invoke_fn *get_invoke_func(struct device *dev)
* optee_remove is called by platform subsystem to alert the driver
* that it should release the device
*/
-static int optee_smc_remove(struct platform_device *pdev)
+static void optee_smc_remove(struct platform_device *pdev)
{
struct optee *optee = platform_get_drvdata(pdev);
@@ -1453,8 +1452,6 @@ static int optee_smc_remove(struct platform_device *pdev)
memunmap(optee->smc.memremaped_shm);
kfree(optee);
-
- return 0;
}
/* optee_shutdown - Device Removal Routine
@@ -1806,7 +1803,7 @@ MODULE_DEVICE_TABLE(of, optee_dt_match);
static struct platform_driver optee_driver = {
.probe = optee_probe,
- .remove = optee_smc_remove,
+ .remove_new = optee_smc_remove,
.shutdown = optee_shutdown,
.driver = {
.name = "optee",
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index e59c20d74b36..82ad095d2b1c 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -11,7 +11,7 @@
#include <linux/idr.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/uaccess.h>
#include <crypto/hash.h>
#include <crypto/sha1.h>
diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h
index 754e11dcb240..9bc50605227c 100644
--- a/drivers/tee/tee_private.h
+++ b/drivers/tee/tee_private.h
@@ -12,41 +12,6 @@
#include <linux/mutex.h>
#include <linux/types.h>
-#define TEE_DEVICE_FLAG_REGISTERED 0x1
-#define TEE_MAX_DEV_NAME_LEN 32
-
-/**
- * struct tee_device - TEE Device representation
- * @name: name of device
- * @desc: description of device
- * @id: unique id of device
- * @flags: represented by TEE_DEVICE_FLAG_REGISTERED above
- * @dev: embedded basic device structure
- * @cdev: embedded cdev
- * @num_users: number of active users of this device
- * @c_no_user: completion used when unregistering the device
- * @mutex: mutex protecting @num_users and @idr
- * @idr: register of user space shared memory objects allocated or
- * registered on this device
- * @pool: shared memory pool
- */
-struct tee_device {
- char name[TEE_MAX_DEV_NAME_LEN];
- const struct tee_desc *desc;
- int id;
- unsigned int flags;
-
- struct device dev;
- struct cdev cdev;
-
- size_t num_users;
- struct completion c_no_users;
- struct mutex mutex; /* protects num_users and idr */
-
- struct idr idr;
- struct tee_shm_pool *pool;
-};
-
int tee_shm_get_fd(struct tee_shm *shm);
bool tee_device_get(struct tee_device *teedev);
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 731d9028b67f..daf6e5cfd59a 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -5,10 +5,11 @@
#include <linux/anon_inodes.h>
#include <linux/device.h>
#include <linux/idr.h>
+#include <linux/io.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
#include <linux/highmem.h>
@@ -202,6 +203,70 @@ struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size)
}
EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf);
+int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
+ int (*shm_register)(struct tee_context *ctx,
+ struct tee_shm *shm,
+ struct page **pages,
+ size_t num_pages,
+ unsigned long start))
+{
+ size_t nr_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
+ struct page **pages;
+ unsigned int i;
+ int rc = 0;
+
+ /*
+ * Ignore alignment since this is already going to be page aligned
+ * and there's no need for any larger alignment.
+ */
+ shm->kaddr = alloc_pages_exact(nr_pages * PAGE_SIZE,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!shm->kaddr)
+ return -ENOMEM;
+
+ shm->paddr = virt_to_phys(shm->kaddr);
+ shm->size = nr_pages * PAGE_SIZE;
+
+ pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < nr_pages; i++)
+ pages[i] = virt_to_page((u8 *)shm->kaddr + i * PAGE_SIZE);
+
+ shm->pages = pages;
+ shm->num_pages = nr_pages;
+
+ if (shm_register) {
+ rc = shm_register(shm->ctx, shm, pages, nr_pages,
+ (unsigned long)shm->kaddr);
+ if (rc)
+ goto err;
+ }
+
+ return 0;
+err:
+ free_pages_exact(shm->kaddr, shm->size);
+ shm->kaddr = NULL;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tee_dyn_shm_alloc_helper);
+
+void tee_dyn_shm_free_helper(struct tee_shm *shm,
+ int (*shm_unregister)(struct tee_context *ctx,
+ struct tee_shm *shm))
+{
+ if (shm_unregister)
+ shm_unregister(shm->ctx, shm);
+ free_pages_exact(shm->kaddr, shm->size);
+ shm->kaddr = NULL;
+ kfree(shm->pages);
+ shm->pages = NULL;
+}
+EXPORT_SYMBOL_GPL(tee_dyn_shm_free_helper);
+
static struct tee_shm *
register_shm_helper(struct tee_context *ctx, struct iov_iter *iter, u32 flags,
int id)
diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c
index 058bfbac657a..80004b55628d 100644
--- a/drivers/tee/tee_shm_pool.c
+++ b/drivers/tee/tee_shm_pool.c
@@ -6,7 +6,7 @@
#include <linux/dma-buf.h>
#include <linux/genalloc.h>
#include <linux/slab.h>
-#include <linux/tee_drv.h>
+#include <linux/tee_core.h>
#include "tee_private.h"
static int pool_op_gen_alloc(struct tee_shm_pool *pool, struct tee_shm *shm,
diff --git a/drivers/tee/tstee/Kconfig b/drivers/tee/tstee/Kconfig
new file mode 100644
index 000000000000..d32f91d47398
--- /dev/null
+++ b/drivers/tee/tstee/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config ARM_TSTEE
+ tristate "Arm Trusted Services TEE driver"
+ depends on ARM_FFA_TRANSPORT
+ default n
+ help
+ The Trusted Services project provides a framework for developing and
+ deploying device Root of Trust services in FF-A Secure Partitions.
+ This driver provides an interface to make Trusted Services Secure
+ Partitions accessible for user space clients, since the FF-A driver
+ doesn't implement a user space interface directly.
diff --git a/drivers/tee/tstee/Makefile b/drivers/tee/tstee/Makefile
new file mode 100644
index 000000000000..5227020ebd30
--- /dev/null
+++ b/drivers/tee/tstee/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+arm-tstee-objs := core.o
+obj-$(CONFIG_ARM_TSTEE) = arm-tstee.o
diff --git a/drivers/tee/tstee/core.c b/drivers/tee/tstee/core.c
new file mode 100644
index 000000000000..533425e9e9e7
--- /dev/null
+++ b/drivers/tee/tstee/core.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Arm Limited
+ */
+
+#include <linux/arm_ffa.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/tee_core.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+#include <linux/xarray.h>
+#include "tstee_private.h"
+
+#define FFA_DIRECT_REQ_ARG_NUM 5
+#define FFA_INVALID_MEM_HANDLE U64_MAX
+
+static void arg_list_to_ffa_data(const u32 *args,
+ struct ffa_send_direct_data *data)
+{
+ data->data0 = args[0];
+ data->data1 = args[1];
+ data->data2 = args[2];
+ data->data3 = args[3];
+ data->data4 = args[4];
+}
+
+static void arg_list_from_ffa_data(const struct ffa_send_direct_data *data,
+ u32 *args)
+{
+ args[0] = lower_32_bits(data->data0);
+ args[1] = lower_32_bits(data->data1);
+ args[2] = lower_32_bits(data->data2);
+ args[3] = lower_32_bits(data->data3);
+ args[4] = lower_32_bits(data->data4);
+}
+
+static void tstee_get_version(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers)
+{
+ struct tstee *tstee = tee_get_drvdata(teedev);
+ struct tee_ioctl_version_data v = {
+ .impl_id = TEE_IMPL_ID_TSTEE,
+ /* FF-A endpoint ID only uses the lower 16 bits */
+ .impl_caps = lower_16_bits(tstee->ffa_dev->vm_id),
+ .gen_caps = 0,
+ };
+
+ *vers = v;
+}
+
+static int tstee_open(struct tee_context *ctx)
+{
+ struct ts_context_data *ctxdata;
+
+ ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
+ if (!ctxdata)
+ return -ENOMEM;
+
+ xa_init_flags(&ctxdata->sess_list, XA_FLAGS_ALLOC);
+
+ ctx->data = ctxdata;
+
+ return 0;
+}
+
+static void tstee_release(struct tee_context *ctx)
+{
+ struct ts_context_data *ctxdata = ctx->data;
+ struct ts_session *sess;
+ unsigned long idx;
+
+ if (!ctxdata)
+ return;
+
+ xa_for_each(&ctxdata->sess_list, idx, sess) {
+ xa_erase(&ctxdata->sess_list, idx);
+ kfree(sess);
+ }
+
+ xa_destroy(&ctxdata->sess_list);
+
+ kfree(ctxdata);
+ ctx->data = NULL;
+}
+
+static int tstee_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param __always_unused)
+{
+ struct tstee *tstee = tee_get_drvdata(ctx->teedev);
+ struct ffa_device *ffa_dev = tstee->ffa_dev;
+ struct ts_context_data *ctxdata = ctx->data;
+ struct ffa_send_direct_data ffa_data;
+ struct ts_session *sess = NULL;
+ u32 ffa_args[FFA_DIRECT_REQ_ARG_NUM] = {};
+ u32 sess_id;
+ int rc;
+
+ ffa_args[TS_RPC_CTRL_REG] =
+ TS_RPC_CTRL_PACK_IFACE_OPCODE(TS_RPC_MGMT_IFACE_ID,
+ TS_RPC_OP_SERVICE_INFO);
+
+ memcpy(ffa_args + TS_RPC_SERVICE_INFO_UUID0, arg->uuid, UUID_SIZE);
+
+ arg_list_to_ffa_data(ffa_args, &ffa_data);
+ rc = ffa_dev->ops->msg_ops->sync_send_receive(ffa_dev, &ffa_data);
+ if (rc)
+ return rc;
+
+ arg_list_from_ffa_data(&ffa_data, ffa_args);
+
+ if (ffa_args[TS_RPC_SERVICE_INFO_RPC_STATUS] != TS_RPC_OK)
+ return -ENODEV;
+
+ if (ffa_args[TS_RPC_SERVICE_INFO_IFACE] > U8_MAX)
+ return -EINVAL;
+
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess)
+ return -ENOMEM;
+
+ sess->iface_id = ffa_args[TS_RPC_SERVICE_INFO_IFACE];
+
+ rc = xa_alloc(&ctxdata->sess_list, &sess_id, sess, xa_limit_32b,
+ GFP_KERNEL);
+ if (rc) {
+ kfree(sess);
+ return rc;
+ }
+
+ arg->session = sess_id;
+ arg->ret = 0;
+
+ return 0;
+}
+
+static int tstee_close_session(struct tee_context *ctx, u32 session)
+{
+ struct ts_context_data *ctxdata = ctx->data;
+ struct ts_session *sess;
+
+ /* Calls xa_lock() internally */
+ sess = xa_erase(&ctxdata->sess_list, session);
+ if (!sess)
+ return -EINVAL;
+
+ kfree(sess);
+
+ return 0;
+}
+
+static int tstee_invoke_func(struct tee_context *ctx,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param)
+{
+ struct tstee *tstee = tee_get_drvdata(ctx->teedev);
+ struct ffa_device *ffa_dev = tstee->ffa_dev;
+ struct ts_context_data *ctxdata = ctx->data;
+ struct ffa_send_direct_data ffa_data;
+ struct tee_shm *shm = NULL;
+ struct ts_session *sess;
+ u32 req_len, ffa_args[FFA_DIRECT_REQ_ARG_NUM] = {};
+ int shm_id, rc;
+ u8 iface_id;
+ u64 handle;
+ u16 opcode;
+
+ xa_lock(&ctxdata->sess_list);
+ sess = xa_load(&ctxdata->sess_list, arg->session);
+
+ /*
+ * Do this while holding the lock to make sure that the session wasn't
+ * closed meanwhile
+ */
+ if (sess)
+ iface_id = sess->iface_id;
+
+ xa_unlock(&ctxdata->sess_list);
+ if (!sess)
+ return -EINVAL;
+
+ opcode = lower_16_bits(arg->func);
+ shm_id = lower_32_bits(param[0].u.value.a);
+ req_len = lower_32_bits(param[0].u.value.b);
+
+ if (shm_id != 0) {
+ shm = tee_shm_get_from_id(ctx, shm_id);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ if (shm->size < req_len) {
+ dev_err(&ffa_dev->dev,
+ "request doesn't fit into shared memory buffer\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ handle = shm->sec_world_id;
+ } else {
+ handle = FFA_INVALID_MEM_HANDLE;
+ }
+
+ ffa_args[TS_RPC_CTRL_REG] = TS_RPC_CTRL_PACK_IFACE_OPCODE(iface_id,
+ opcode);
+ ffa_args[TS_RPC_SERVICE_MEM_HANDLE_LSW] = lower_32_bits(handle);
+ ffa_args[TS_RPC_SERVICE_MEM_HANDLE_MSW] = upper_32_bits(handle);
+ ffa_args[TS_RPC_SERVICE_REQ_LEN] = req_len;
+ ffa_args[TS_RPC_SERVICE_CLIENT_ID] = 0;
+
+ arg_list_to_ffa_data(ffa_args, &ffa_data);
+ rc = ffa_dev->ops->msg_ops->sync_send_receive(ffa_dev, &ffa_data);
+ if (rc)
+ goto out;
+
+ arg_list_from_ffa_data(&ffa_data, ffa_args);
+
+ if (ffa_args[TS_RPC_SERVICE_RPC_STATUS] != TS_RPC_OK) {
+ dev_err(&ffa_dev->dev, "invoke_func rpc status: %d\n",
+ ffa_args[TS_RPC_SERVICE_RPC_STATUS]);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ arg->ret = ffa_args[TS_RPC_SERVICE_STATUS];
+ if (shm && shm->size >= ffa_args[TS_RPC_SERVICE_RESP_LEN])
+ param[0].u.value.a = ffa_args[TS_RPC_SERVICE_RESP_LEN];
+
+out:
+ if (shm)
+ tee_shm_put(shm);
+
+ return rc;
+}
+
+static int tstee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start __always_unused)
+{
+ struct tstee *tstee = tee_get_drvdata(ctx->teedev);
+ struct ffa_device *ffa_dev = tstee->ffa_dev;
+ struct ffa_mem_region_attributes mem_attr = {
+ .receiver = tstee->ffa_dev->vm_id,
+ .attrs = FFA_MEM_RW,
+ .flag = 0,
+ };
+ struct ffa_mem_ops_args mem_args = {
+ .attrs = &mem_attr,
+ .use_txbuf = true,
+ .nattrs = 1,
+ .flags = 0,
+ };
+ struct ffa_send_direct_data ffa_data;
+ struct sg_table sgt;
+ u32 ffa_args[FFA_DIRECT_REQ_ARG_NUM] = {};
+ int rc;
+
+ rc = sg_alloc_table_from_pages(&sgt, pages, num_pages, 0,
+ num_pages * PAGE_SIZE, GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ mem_args.sg = sgt.sgl;
+ rc = ffa_dev->ops->mem_ops->memory_share(&mem_args);
+ sg_free_table(&sgt);
+ if (rc)
+ return rc;
+
+ shm->sec_world_id = mem_args.g_handle;
+
+ ffa_args[TS_RPC_CTRL_REG] =
+ TS_RPC_CTRL_PACK_IFACE_OPCODE(TS_RPC_MGMT_IFACE_ID,
+ TS_RPC_OP_RETRIEVE_MEM);
+ ffa_args[TS_RPC_RETRIEVE_MEM_HANDLE_LSW] =
+ lower_32_bits(shm->sec_world_id);
+ ffa_args[TS_RPC_RETRIEVE_MEM_HANDLE_MSW] =
+ upper_32_bits(shm->sec_world_id);
+ ffa_args[TS_RPC_RETRIEVE_MEM_TAG_LSW] = 0;
+ ffa_args[TS_RPC_RETRIEVE_MEM_TAG_MSW] = 0;
+
+ arg_list_to_ffa_data(ffa_args, &ffa_data);
+ rc = ffa_dev->ops->msg_ops->sync_send_receive(ffa_dev, &ffa_data);
+ if (rc) {
+ (void)ffa_dev->ops->mem_ops->memory_reclaim(shm->sec_world_id,
+ 0);
+ return rc;
+ }
+
+ arg_list_from_ffa_data(&ffa_data, ffa_args);
+
+ if (ffa_args[TS_RPC_RETRIEVE_MEM_RPC_STATUS] != TS_RPC_OK) {
+ dev_err(&ffa_dev->dev, "shm_register rpc status: %d\n",
+ ffa_args[TS_RPC_RETRIEVE_MEM_RPC_STATUS]);
+ ffa_dev->ops->mem_ops->memory_reclaim(shm->sec_world_id, 0);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tstee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
+{
+ struct tstee *tstee = tee_get_drvdata(ctx->teedev);
+ struct ffa_device *ffa_dev = tstee->ffa_dev;
+ struct ffa_send_direct_data ffa_data;
+ u32 ffa_args[FFA_DIRECT_REQ_ARG_NUM] = {};
+ int rc;
+
+ ffa_args[TS_RPC_CTRL_REG] =
+ TS_RPC_CTRL_PACK_IFACE_OPCODE(TS_RPC_MGMT_IFACE_ID,
+ TS_RPC_OP_RELINQ_MEM);
+ ffa_args[TS_RPC_RELINQ_MEM_HANDLE_LSW] =
+ lower_32_bits(shm->sec_world_id);
+ ffa_args[TS_RPC_RELINQ_MEM_HANDLE_MSW] =
+ upper_32_bits(shm->sec_world_id);
+
+ arg_list_to_ffa_data(ffa_args, &ffa_data);
+ rc = ffa_dev->ops->msg_ops->sync_send_receive(ffa_dev, &ffa_data);
+ if (rc)
+ return rc;
+ arg_list_from_ffa_data(&ffa_data, ffa_args);
+
+ if (ffa_args[TS_RPC_RELINQ_MEM_RPC_STATUS] != TS_RPC_OK) {
+ dev_err(&ffa_dev->dev, "shm_unregister rpc status: %d\n",
+ ffa_args[TS_RPC_RELINQ_MEM_RPC_STATUS]);
+ return -EINVAL;
+ }
+
+ rc = ffa_dev->ops->mem_ops->memory_reclaim(shm->sec_world_id, 0);
+
+ return rc;
+}
+
+static const struct tee_driver_ops tstee_ops = {
+ .get_version = tstee_get_version,
+ .open = tstee_open,
+ .release = tstee_release,
+ .open_session = tstee_open_session,
+ .close_session = tstee_close_session,
+ .invoke_func = tstee_invoke_func,
+};
+
+static const struct tee_desc tstee_desc = {
+ .name = "tstee-clnt",
+ .ops = &tstee_ops,
+ .owner = THIS_MODULE,
+};
+
+static int pool_op_alloc(struct tee_shm_pool *pool, struct tee_shm *shm,
+ size_t size, size_t align)
+{
+ return tee_dyn_shm_alloc_helper(shm, size, align, tstee_shm_register);
+}
+
+static void pool_op_free(struct tee_shm_pool *pool, struct tee_shm *shm)
+{
+ tee_dyn_shm_free_helper(shm, tstee_shm_unregister);
+}
+
+static void pool_op_destroy_pool(struct tee_shm_pool *pool)
+{
+ kfree(pool);
+}
+
+static const struct tee_shm_pool_ops pool_ops = {
+ .alloc = pool_op_alloc,
+ .free = pool_op_free,
+ .destroy_pool = pool_op_destroy_pool,
+};
+
+static struct tee_shm_pool *tstee_create_shm_pool(void)
+{
+ struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ pool->ops = &pool_ops;
+
+ return pool;
+}
+
+static bool tstee_check_rpc_compatible(struct ffa_device *ffa_dev)
+{
+ struct ffa_send_direct_data ffa_data;
+ u32 ffa_args[FFA_DIRECT_REQ_ARG_NUM] = {};
+
+ ffa_args[TS_RPC_CTRL_REG] =
+ TS_RPC_CTRL_PACK_IFACE_OPCODE(TS_RPC_MGMT_IFACE_ID,
+ TS_RPC_OP_GET_VERSION);
+
+ arg_list_to_ffa_data(ffa_args, &ffa_data);
+ if (ffa_dev->ops->msg_ops->sync_send_receive(ffa_dev, &ffa_data))
+ return false;
+
+ arg_list_from_ffa_data(&ffa_data, ffa_args);
+
+ return ffa_args[TS_RPC_GET_VERSION_RESP] == TS_RPC_PROTOCOL_VERSION;
+}
+
+static int tstee_probe(struct ffa_device *ffa_dev)
+{
+ struct tstee *tstee;
+ int rc;
+
+ ffa_dev->ops->msg_ops->mode_32bit_set(ffa_dev);
+
+ if (!tstee_check_rpc_compatible(ffa_dev))
+ return -EINVAL;
+
+ tstee = kzalloc(sizeof(*tstee), GFP_KERNEL);
+ if (!tstee)
+ return -ENOMEM;
+
+ tstee->ffa_dev = ffa_dev;
+
+ tstee->pool = tstee_create_shm_pool();
+ if (IS_ERR(tstee->pool)) {
+ rc = PTR_ERR(tstee->pool);
+ tstee->pool = NULL;
+ goto err_free_tstee;
+ }
+
+ tstee->teedev = tee_device_alloc(&tstee_desc, NULL, tstee->pool, tstee);
+ if (IS_ERR(tstee->teedev)) {
+ rc = PTR_ERR(tstee->teedev);
+ tstee->teedev = NULL;
+ goto err_free_pool;
+ }
+
+ rc = tee_device_register(tstee->teedev);
+ if (rc)
+ goto err_unreg_teedev;
+
+ ffa_dev_set_drvdata(ffa_dev, tstee);
+
+ return 0;
+
+err_unreg_teedev:
+ tee_device_unregister(tstee->teedev);
+err_free_pool:
+ tee_shm_pool_free(tstee->pool);
+err_free_tstee:
+ kfree(tstee);
+ return rc;
+}
+
+static void tstee_remove(struct ffa_device *ffa_dev)
+{
+ struct tstee *tstee = ffa_dev->dev.driver_data;
+
+ tee_device_unregister(tstee->teedev);
+ tee_shm_pool_free(tstee->pool);
+ kfree(tstee);
+}
+
+static const struct ffa_device_id tstee_device_ids[] = {
+ /* TS RPC protocol UUID: bdcd76d7-825e-4751-963b-86d4f84943ac */
+ { TS_RPC_UUID },
+ {}
+};
+
+static struct ffa_driver tstee_driver = {
+ .name = "arm_tstee",
+ .probe = tstee_probe,
+ .remove = tstee_remove,
+ .id_table = tstee_device_ids,
+};
+
+module_ffa_driver(tstee_driver);
+
+MODULE_AUTHOR("Balint Dobszay <balint.dobszay@arm.com>");
+MODULE_DESCRIPTION("Arm Trusted Services TEE driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tee/tstee/tstee_private.h b/drivers/tee/tstee/tstee_private.h
new file mode 100644
index 000000000000..8e58725b57eb
--- /dev/null
+++ b/drivers/tee/tstee/tstee_private.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Arm Limited
+ */
+
+#ifndef TSTEE_PRIVATE_H
+#define TSTEE_PRIVATE_H
+
+#include <linux/arm_ffa.h>
+#include <linux/bitops.h>
+#include <linux/tee_core.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+#include <linux/xarray.h>
+
+/*
+ * The description of the ABI implemented in this file is available at
+ * https://trusted-services.readthedocs.io/en/v1.0.0/developer/service-access-protocols.html#abi
+ */
+
+/* UUID of this protocol */
+#define TS_RPC_UUID UUID_INIT(0xbdcd76d7, 0x825e, 0x4751, \
+ 0x96, 0x3b, 0x86, 0xd4, 0xf8, 0x49, 0x43, 0xac)
+
+/* Protocol version*/
+#define TS_RPC_PROTOCOL_VERSION (1)
+
+/* Status codes */
+#define TS_RPC_OK (0)
+
+/* RPC control register */
+#define TS_RPC_CTRL_REG (0)
+#define OPCODE_MASK GENMASK(15, 0)
+#define IFACE_ID_MASK GENMASK(23, 16)
+#define TS_RPC_CTRL_OPCODE(x) ((u16)(FIELD_GET(OPCODE_MASK, (x))))
+#define TS_RPC_CTRL_IFACE_ID(x) ((u8)(FIELD_GET(IFACE_ID_MASK, (x))))
+#define TS_RPC_CTRL_PACK_IFACE_OPCODE(i, o) \
+ (FIELD_PREP(IFACE_ID_MASK, (i)) | FIELD_PREP(OPCODE_MASK, (o)))
+#define TS_RPC_CTRL_SAP_RC BIT(30)
+#define TS_RPC_CTRL_SAP_ERR BIT(31)
+
+/* Interface ID for RPC management operations */
+#define TS_RPC_MGMT_IFACE_ID (0xff)
+
+/* Management calls */
+#define TS_RPC_OP_GET_VERSION (0x0000)
+#define TS_RPC_GET_VERSION_RESP (1)
+
+#define TS_RPC_OP_RETRIEVE_MEM (0x0001)
+#define TS_RPC_RETRIEVE_MEM_HANDLE_LSW (1)
+#define TS_RPC_RETRIEVE_MEM_HANDLE_MSW (2)
+#define TS_RPC_RETRIEVE_MEM_TAG_LSW (3)
+#define TS_RPC_RETRIEVE_MEM_TAG_MSW (4)
+#define TS_RPC_RETRIEVE_MEM_RPC_STATUS (1)
+
+#define TS_RPC_OP_RELINQ_MEM (0x0002)
+#define TS_RPC_RELINQ_MEM_HANDLE_LSW (1)
+#define TS_RPC_RELINQ_MEM_HANDLE_MSW (2)
+#define TS_RPC_RELINQ_MEM_RPC_STATUS (1)
+
+#define TS_RPC_OP_SERVICE_INFO (0x0003)
+#define TS_RPC_SERVICE_INFO_UUID0 (1)
+#define TS_RPC_SERVICE_INFO_UUID1 (2)
+#define TS_RPC_SERVICE_INFO_UUID2 (3)
+#define TS_RPC_SERVICE_INFO_UUID3 (4)
+#define TS_RPC_SERVICE_INFO_RPC_STATUS (1)
+#define TS_RPC_SERVICE_INFO_IFACE (2)
+
+/* Service call */
+#define TS_RPC_SERVICE_MEM_HANDLE_LSW (1)
+#define TS_RPC_SERVICE_MEM_HANDLE_MSW (2)
+#define TS_RPC_SERVICE_REQ_LEN (3)
+#define TS_RPC_SERVICE_CLIENT_ID (4)
+#define TS_RPC_SERVICE_RPC_STATUS (1)
+#define TS_RPC_SERVICE_STATUS (2)
+#define TS_RPC_SERVICE_RESP_LEN (3)
+
+struct tstee {
+ struct ffa_device *ffa_dev;
+ struct tee_device *teedev;
+ struct tee_shm_pool *pool;
+};
+
+struct ts_session {
+ u8 iface_id;
+};
+
+struct ts_context_data {
+ struct xarray sess_list;
+};
+
+#endif /* TSTEE_PRIVATE_H */
diff --git a/drivers/thermal/amlogic_thermal.c b/drivers/thermal/amlogic_thermal.c
index df7a5ed55385..cd4776aa805e 100644
--- a/drivers/thermal/amlogic_thermal.c
+++ b/drivers/thermal/amlogic_thermal.c
@@ -220,6 +220,12 @@ static const struct amlogic_thermal_data amlogic_thermal_g12a_ddr_param = {
.regmap_config = &amlogic_thermal_regmap_config_g12a,
};
+static const struct amlogic_thermal_data amlogic_thermal_a1_cpu_param = {
+ .u_efuse_off = 0x114,
+ .calibration_parameters = &amlogic_thermal_g12a,
+ .regmap_config = &amlogic_thermal_regmap_config_g12a,
+};
+
static const struct of_device_id of_amlogic_thermal_match[] = {
{
.compatible = "amlogic,g12a-ddr-thermal",
@@ -229,6 +235,10 @@ static const struct of_device_id of_amlogic_thermal_match[] = {
.compatible = "amlogic,g12a-cpu-thermal",
.data = &amlogic_thermal_g12a_cpu_param,
},
+ {
+ .compatible = "amlogic,a1-cpu-thermal",
+ .data = &amlogic_thermal_a1_cpu_param,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_amlogic_thermal_match);
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index f783547ef964..fdcb077cfd54 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -763,7 +763,6 @@ static void armada_set_sane_name(struct platform_device *pdev,
struct armada_thermal_priv *priv)
{
const char *name = dev_name(&pdev->dev);
- char *insane_char;
if (strlen(name) > THERMAL_NAME_LENGTH) {
/*
@@ -781,12 +780,8 @@ static void armada_set_sane_name(struct platform_device *pdev,
/* Save the name locally */
strscpy(priv->zone_name, name, THERMAL_NAME_LENGTH);
- /* Then check there are no '-' or hwmon core will complain */
- do {
- insane_char = strpbrk(priv->zone_name, "-");
- if (insane_char)
- *insane_char = '_';
- } while (insane_char);
+ /* Then ensure there are no '-' or hwmon core will complain */
+ strreplace(priv->zone_name, '-', '_');
}
/*
diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
index 9d1b1459700d..280071be30b1 100644
--- a/drivers/thermal/cpufreq_cooling.c
+++ b/drivers/thermal/cpufreq_cooling.c
@@ -477,7 +477,6 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
- struct cpumask *cpus;
unsigned int frequency;
int ret;
@@ -494,8 +493,6 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
ret = freq_qos_update_request(&cpufreq_cdev->qos_req, frequency);
if (ret >= 0) {
cpufreq_cdev->cpufreq_state = state;
- cpus = cpufreq_cdev->policy->related_cpus;
- arch_update_thermal_pressure(cpus, frequency);
ret = 0;
}
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index c3b2943a2db8..acb52c9ee10f 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -13,60 +13,11 @@
#include "thermal_core.h"
-static int thermal_zone_trip_update(struct thermal_zone_device *tz,
- const struct thermal_trip *trip)
-{
- int trip_index = thermal_zone_trip_id(tz, trip);
- struct thermal_instance *instance;
-
- if (!trip->hysteresis)
- dev_info_once(&tz->device,
- "Zero hysteresis value for thermal zone %s\n", tz->type);
-
- dev_dbg(&tz->device, "Trip%d[temp=%d]:temp=%d:hyst=%d\n",
- trip_index, trip->temperature, tz->temperature,
- trip->hysteresis);
-
- list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- if (instance->trip != trip)
- continue;
-
- /* in case fan is in initial state, switch the fan off */
- if (instance->target == THERMAL_NO_TARGET)
- instance->target = 0;
-
- /* in case fan is neither on nor off set the fan to active */
- if (instance->target != 0 && instance->target != 1) {
- pr_warn("Thermal instance %s controlled by bang-bang has unexpected state: %ld\n",
- instance->name, instance->target);
- instance->target = 1;
- }
-
- /*
- * enable fan when temperature exceeds trip_temp and disable
- * the fan in case it falls below trip_temp minus hysteresis
- */
- if (instance->target == 0 && tz->temperature >= trip->temperature)
- instance->target = 1;
- else if (instance->target == 1 &&
- tz->temperature < trip->temperature - trip->hysteresis)
- instance->target = 0;
-
- dev_dbg(&instance->cdev->device, "target=%d\n",
- (int)instance->target);
-
- mutex_lock(&instance->cdev->lock);
- instance->cdev->updated = false; /* cdev needs update */
- mutex_unlock(&instance->cdev->lock);
- }
-
- return 0;
-}
-
/**
* bang_bang_control - controls devices associated with the given zone
* @tz: thermal_zone_device
* @trip: the trip point
+ * @crossed_up: whether or not the trip has been crossed on the way up
*
* Regulation Logic: a two point regulation, deliver cooling state depending
* on the previous state shown in this diagram:
@@ -90,26 +41,54 @@ static int thermal_zone_trip_update(struct thermal_zone_device *tz,
* (trip_temp - hyst) so that the fan gets turned off again.
*
*/
-static int bang_bang_control(struct thermal_zone_device *tz,
- const struct thermal_trip *trip)
+static void bang_bang_control(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ bool crossed_up)
{
struct thermal_instance *instance;
- int ret;
lockdep_assert_held(&tz->lock);
- ret = thermal_zone_trip_update(tz, trip);
- if (ret)
- return ret;
+ dev_dbg(&tz->device, "Trip%d[temp=%d]:temp=%d:hyst=%d\n",
+ thermal_zone_trip_id(tz, trip), trip->temperature,
+ tz->temperature, trip->hysteresis);
+
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ if (instance->trip != trip)
+ continue;
+
+ if (instance->target == THERMAL_NO_TARGET)
+ instance->target = 0;
+
+ if (instance->target != 0 && instance->target != 1) {
+ pr_debug("Unexpected state %ld of thermal instance %s in bang-bang\n",
+ instance->target, instance->name);
+
+ instance->target = 1;
+ }
+
+ /*
+ * Enable the fan when the trip is crossed on the way up and
+ * disable it when the trip is crossed on the way down.
+ */
+ if (instance->target == 0 && crossed_up)
+ instance->target = 1;
+ else if (instance->target == 1 && !crossed_up)
+ instance->target = 0;
+
+ dev_dbg(&instance->cdev->device, "target=%ld\n", instance->target);
+
+ mutex_lock(&instance->cdev->lock);
+ instance->cdev->updated = false; /* cdev needs update */
+ mutex_unlock(&instance->cdev->lock);
+ }
list_for_each_entry(instance, &tz->thermal_instances, tz_node)
thermal_cdev_update(instance->cdev);
-
- return 0;
}
static struct thermal_governor thermal_gov_bang_bang = {
.name = "bang_bang",
- .throttle = bang_bang_control,
+ .trip_crossed = bang_bang_control,
};
THERMAL_GOVERNOR_DECLARE(thermal_gov_bang_bang);
diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c
index 4da25a0009d7..ce0ea571ed67 100644
--- a/drivers/thermal/gov_fair_share.c
+++ b/drivers/thermal/gov_fair_share.c
@@ -17,97 +17,111 @@
static int get_trip_level(struct thermal_zone_device *tz)
{
- const struct thermal_trip *trip, *level_trip = NULL;
+ const struct thermal_trip_desc *level_td = NULL;
+ const struct thermal_trip_desc *td;
int trip_level = -1;
- for_each_trip(tz, trip) {
- if (trip->temperature >= tz->temperature)
+ for_each_trip_desc(tz, td) {
+ if (td->threshold > tz->temperature)
continue;
trip_level++;
- if (!level_trip || trip->temperature > level_trip->temperature)
- level_trip = trip;
+ if (!level_td || td->threshold > level_td->threshold)
+ level_td = td;
}
/* Bail out if the temperature is not greater than any trips. */
if (trip_level < 0)
return 0;
- trace_thermal_zone_trip(tz, thermal_zone_trip_id(tz, level_trip),
- level_trip->type);
+ trace_thermal_zone_trip(tz, thermal_zone_trip_id(tz, &level_td->trip),
+ level_td->trip.type);
return trip_level;
}
-static long get_target_state(struct thermal_zone_device *tz,
- struct thermal_cooling_device *cdev, int percentage, int level)
-{
- return (long)(percentage * level * cdev->max_state) / (100 * tz->num_trips);
-}
-
/**
* fair_share_throttle - throttles devices associated with the given zone
* @tz: thermal_zone_device
* @trip: trip point
+ * @trip_level: number of trips crossed by the zone temperature
*
* Throttling Logic: This uses three parameters to calculate the new
* throttle state of the cooling devices associated with the given zone.
*
* Parameters used for Throttling:
* P1. max_state: Maximum throttle state exposed by the cooling device.
- * P2. percentage[i]/100:
+ * P2. weight[i]/total_weight:
* How 'effective' the 'i'th device is, in cooling the given zone.
- * P3. cur_trip_level/max_no_of_trips:
+ * P3. trip_level/max_no_of_trips:
* This describes the extent to which the devices should be throttled.
* We do not want to throttle too much when we trip a lower temperature,
* whereas the throttling is at full swing if we trip critical levels.
- * (Heavily assumes the trip points are in ascending order)
* new_state of cooling device = P3 * P2 * P1
*/
-static int fair_share_throttle(struct thermal_zone_device *tz,
- const struct thermal_trip *trip)
+static void fair_share_throttle(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ int trip_level)
{
struct thermal_instance *instance;
int total_weight = 0;
- int total_instance = 0;
- int cur_trip_level = get_trip_level(tz);
-
- lockdep_assert_held(&tz->lock);
+ int nr_instances = 0;
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
if (instance->trip != trip)
continue;
total_weight += instance->weight;
- total_instance++;
+ nr_instances++;
}
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- int percentage;
struct thermal_cooling_device *cdev = instance->cdev;
+ u64 dividend;
+ u32 divisor;
if (instance->trip != trip)
continue;
- if (!total_weight)
- percentage = 100 / total_instance;
- else
- percentage = (instance->weight * 100) / total_weight;
-
- instance->target = get_target_state(tz, cdev, percentage,
- cur_trip_level);
+ dividend = trip_level;
+ dividend *= cdev->max_state;
+ divisor = tz->num_trips;
+ if (total_weight) {
+ dividend *= instance->weight;
+ divisor *= total_weight;
+ } else {
+ divisor *= nr_instances;
+ }
+ instance->target = div_u64(dividend, divisor);
mutex_lock(&cdev->lock);
__thermal_cdev_update(cdev);
mutex_unlock(&cdev->lock);
}
+}
- return 0;
+static void fair_share_manage(struct thermal_zone_device *tz)
+{
+ int trip_level = get_trip_level(tz);
+ const struct thermal_trip_desc *td;
+
+ lockdep_assert_held(&tz->lock);
+
+ for_each_trip_desc(tz, td) {
+ const struct thermal_trip *trip = &td->trip;
+
+ if (trip->temperature == THERMAL_TEMP_INVALID ||
+ trip->type == THERMAL_TRIP_CRITICAL ||
+ trip->type == THERMAL_TRIP_HOT)
+ continue;
+
+ fair_share_throttle(tz, trip, trip_level);
+ }
}
static struct thermal_governor thermal_gov_fair_share = {
- .name = "fair_share",
- .throttle = fair_share_throttle,
+ .name = "fair_share",
+ .manage = fair_share_manage,
};
THERMAL_GOVERNOR_DECLARE(thermal_gov_fair_share);
diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
index e25e48d76aa7..45f04a25255a 100644
--- a/drivers/thermal/gov_power_allocator.c
+++ b/drivers/thermal/gov_power_allocator.c
@@ -66,6 +66,7 @@ struct power_actor {
* struct power_allocator_params - parameters for the power allocator governor
* @allocated_tzp: whether we have allocated tzp for this thermal zone and
* it needs to be freed on unbind
+ * @update_cdevs: whether or not update cdevs on the next run
* @err_integral: accumulated error in the PID controller.
* @prev_err: error in the previous iteration of the PID controller.
* Used to calculate the derivative term.
@@ -84,6 +85,7 @@ struct power_actor {
*/
struct power_allocator_params {
bool allocated_tzp;
+ bool update_cdevs;
s64 err_integral;
s32 prev_err;
u32 sustainable_power;
@@ -395,7 +397,7 @@ static void divvy_up_power(struct power_actor *power, int num_actors,
}
}
-static int allocate_power(struct thermal_zone_device *tz, int control_temp)
+static void allocate_power(struct thermal_zone_device *tz, int control_temp)
{
struct power_allocator_params *params = tz->governor_data;
unsigned int num_actors = params->num_actors;
@@ -410,7 +412,7 @@ static int allocate_power(struct thermal_zone_device *tz, int control_temp)
int i = 0, ret;
if (!num_actors)
- return -ENODEV;
+ return;
/* Clean all buffers for new power estimations */
memset(power, 0, params->buffer_size);
@@ -471,8 +473,6 @@ static int allocate_power(struct thermal_zone_device *tz, int control_temp)
num_actors, power_range,
max_allocatable_power, tz->temperature,
control_temp - tz->temperature);
-
- return 0;
}
/**
@@ -496,9 +496,11 @@ static void get_governor_trips(struct thermal_zone_device *tz,
const struct thermal_trip *first_passive = NULL;
const struct thermal_trip *last_passive = NULL;
const struct thermal_trip *last_active = NULL;
- const struct thermal_trip *trip;
+ const struct thermal_trip_desc *td;
+
+ for_each_trip_desc(tz, td) {
+ const struct thermal_trip *trip = &td->trip;
- for_each_trip(tz, trip) {
switch (trip->type) {
case THERMAL_TRIP_PASSIVE:
if (!first_passive) {
@@ -533,7 +535,7 @@ static void reset_pid_controller(struct power_allocator_params *params)
params->prev_err = 0;
}
-static void allow_maximum_power(struct thermal_zone_device *tz, bool update)
+static void allow_maximum_power(struct thermal_zone_device *tz)
{
struct power_allocator_params *params = tz->governor_data;
struct thermal_cooling_device *cdev;
@@ -555,7 +557,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz, bool update)
*/
cdev->ops->get_requested_power(cdev, &req_power);
- if (update)
+ if (params->update_cdevs)
__thermal_cdev_update(cdev);
mutex_unlock(&cdev->lock);
@@ -743,40 +745,29 @@ static void power_allocator_unbind(struct thermal_zone_device *tz)
tz->governor_data = NULL;
}
-static int power_allocator_throttle(struct thermal_zone_device *tz,
- const struct thermal_trip *trip)
+static void power_allocator_manage(struct thermal_zone_device *tz)
{
struct power_allocator_params *params = tz->governor_data;
- bool update;
+ const struct thermal_trip *trip = params->trip_switch_on;
lockdep_assert_held(&tz->lock);
- /*
- * We get called for every trip point but we only need to do
- * our calculations once
- */
- if (trip != params->trip_max)
- return 0;
-
- trip = params->trip_switch_on;
if (trip && tz->temperature < trip->temperature) {
- update = tz->passive;
- tz->passive = 0;
reset_pid_controller(params);
- allow_maximum_power(tz, update);
- return 0;
+ allow_maximum_power(tz);
+ params->update_cdevs = false;
+ return;
}
- tz->passive = 1;
-
- return allocate_power(tz, params->trip_max->temperature);
+ allocate_power(tz, params->trip_max->temperature);
+ params->update_cdevs = true;
}
static struct thermal_governor thermal_gov_power_allocator = {
.name = "power_allocator",
.bind_to_tz = power_allocator_bind,
.unbind_from_tz = power_allocator_unbind,
- .throttle = power_allocator_throttle,
+ .manage = power_allocator_manage,
.update_tz = power_allocator_update_tz,
};
THERMAL_GOVERNOR_DECLARE(thermal_gov_power_allocator);
diff --git a/drivers/thermal/gov_step_wise.c b/drivers/thermal/gov_step_wise.c
index 5436aa58d41e..e0fdc497bfcc 100644
--- a/drivers/thermal/gov_step_wise.c
+++ b/drivers/thermal/gov_step_wise.c
@@ -32,7 +32,6 @@ static unsigned long get_target_state(struct thermal_instance *instance,
{
struct thermal_cooling_device *cdev = instance->cdev;
unsigned long cur_state;
- unsigned long next_target;
/*
* We keep this instance the way it is by default.
@@ -40,112 +39,99 @@ static unsigned long get_target_state(struct thermal_instance *instance,
* cdev in use to determine the next_target.
*/
cdev->ops->get_cur_state(cdev, &cur_state);
- next_target = instance->target;
dev_dbg(&cdev->device, "cur_state=%ld\n", cur_state);
if (!instance->initialized) {
- if (throttle) {
- next_target = clamp((cur_state + 1), instance->lower, instance->upper);
- } else {
- next_target = THERMAL_NO_TARGET;
- }
+ if (throttle)
+ return clamp(cur_state + 1, instance->lower, instance->upper);
- return next_target;
+ return THERMAL_NO_TARGET;
}
if (throttle) {
if (trend == THERMAL_TREND_RAISING)
- next_target = clamp((cur_state + 1), instance->lower, instance->upper);
- } else {
- if (trend == THERMAL_TREND_DROPPING) {
- if (cur_state <= instance->lower)
- next_target = THERMAL_NO_TARGET;
- else
- next_target = clamp((cur_state - 1), instance->lower, instance->upper);
- }
+ return clamp(cur_state + 1, instance->lower, instance->upper);
+ } else if (trend == THERMAL_TREND_DROPPING) {
+ if (cur_state <= instance->lower)
+ return THERMAL_NO_TARGET;
+
+ return clamp(cur_state - 1, instance->lower, instance->upper);
}
- return next_target;
+ return instance->target;
}
static void thermal_zone_trip_update(struct thermal_zone_device *tz,
- const struct thermal_trip *trip)
+ const struct thermal_trip *trip,
+ int trip_threshold)
{
+ enum thermal_trend trend = get_tz_trend(tz, trip);
int trip_id = thermal_zone_trip_id(tz, trip);
- enum thermal_trend trend;
struct thermal_instance *instance;
bool throttle = false;
- int old_target;
-
- trend = get_tz_trend(tz, trip);
- if (tz->temperature >= trip->temperature) {
+ if (tz->temperature >= trip_threshold) {
throttle = true;
trace_thermal_zone_trip(tz, trip_id, trip->type);
}
dev_dbg(&tz->device, "Trip%d[type=%d,temp=%d]:trend=%d,throttle=%d\n",
- trip_id, trip->type, trip->temperature, trend, throttle);
+ trip_id, trip->type, trip_threshold, trend, throttle);
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ int old_target;
+
if (instance->trip != trip)
continue;
old_target = instance->target;
instance->target = get_target_state(instance, trend, throttle);
- dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
- old_target, (int)instance->target);
+
+ dev_dbg(&instance->cdev->device, "old_target=%d, target=%ld\n",
+ old_target, instance->target);
if (instance->initialized && old_target == instance->target)
continue;
- if (old_target == THERMAL_NO_TARGET &&
- instance->target != THERMAL_NO_TARGET) {
- /* Activate a passive thermal instance */
- if (trip->type == THERMAL_TRIP_PASSIVE)
- tz->passive++;
- } else if (old_target != THERMAL_NO_TARGET &&
- instance->target == THERMAL_NO_TARGET) {
- /* Deactivate a passive thermal instance */
- if (trip->type == THERMAL_TRIP_PASSIVE)
- tz->passive--;
- }
-
instance->initialized = true;
+
mutex_lock(&instance->cdev->lock);
instance->cdev->updated = false; /* cdev needs update */
mutex_unlock(&instance->cdev->lock);
}
}
-/**
- * step_wise_throttle - throttles devices associated with the given zone
- * @tz: thermal_zone_device
- * @trip: trip point
- *
- * Throttling Logic: This uses the trend of the thermal zone to throttle.
- * If the thermal zone is 'heating up' this throttles all the cooling
- * devices associated with the zone and its particular trip point, by one
- * step. If the zone is 'cooling down' it brings back the performance of
- * the devices by one step.
- */
-static int step_wise_throttle(struct thermal_zone_device *tz,
- const struct thermal_trip *trip)
+static void step_wise_manage(struct thermal_zone_device *tz)
{
+ const struct thermal_trip_desc *td;
struct thermal_instance *instance;
lockdep_assert_held(&tz->lock);
- thermal_zone_trip_update(tz, trip);
+ /*
+ * Throttling Logic: Use the trend of the thermal zone to throttle.
+ * If the thermal zone is 'heating up', throttle all of the cooling
+ * devices associated with each trip point by one step. If the zone
+ * is 'cooling down', it brings back the performance of the devices
+ * by one step.
+ */
+ for_each_trip_desc(tz, td) {
+ const struct thermal_trip *trip = &td->trip;
+
+ if (trip->temperature == THERMAL_TEMP_INVALID ||
+ trip->type == THERMAL_TRIP_CRITICAL ||
+ trip->type == THERMAL_TRIP_HOT)
+ continue;
+
+ thermal_zone_trip_update(tz, trip, td->threshold);
+ }
list_for_each_entry(instance, &tz->thermal_instances, tz_node)
thermal_cdev_update(instance->cdev);
-
- return 0;
}
static struct thermal_governor thermal_gov_step_wise = {
- .name = "step_wise",
- .throttle = step_wise_throttle,
+ .name = "step_wise",
+ .manage = step_wise_manage,
};
THERMAL_GOVERNOR_DECLARE(thermal_gov_step_wise);
diff --git a/drivers/thermal/gov_user_space.c b/drivers/thermal/gov_user_space.c
index 7a1790b7e8f5..75137b419eb2 100644
--- a/drivers/thermal/gov_user_space.c
+++ b/drivers/thermal/gov_user_space.c
@@ -26,11 +26,13 @@ static int user_space_bind(struct thermal_zone_device *tz)
* notify_user_space - Notifies user space about thermal events
* @tz: thermal_zone_device
* @trip: trip point
+ * @crossed_up: whether or not the trip has been crossed on the way up
*
* This function notifies the user space through UEvents.
*/
-static int notify_user_space(struct thermal_zone_device *tz,
- const struct thermal_trip *trip)
+static void notify_user_space(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ bool crossed_up)
{
char *thermal_prop[5];
int i;
@@ -46,13 +48,11 @@ static int notify_user_space(struct thermal_zone_device *tz,
kobject_uevent_env(&tz->device.kobj, KOBJ_CHANGE, thermal_prop);
for (i = 0; i < 4; ++i)
kfree(thermal_prop[i]);
-
- return 0;
}
static struct thermal_governor thermal_gov_user_space = {
.name = "user_space",
- .throttle = notify_user_space,
+ .trip_crossed = notify_user_space,
.bind_to_tz = user_space_bind,
};
THERMAL_GOVERNOR_DECLARE(thermal_gov_user_space);
diff --git a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
index dc519a665c18..4b4a4d63e61f 100644
--- a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
+++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
@@ -309,7 +309,7 @@ static int acpi_parse_psvt(acpi_handle handle, int *psvt_count, struct psvt **ps
if (knob->type == ACPI_TYPE_STRING) {
memset(&psvt->limit, 0, sizeof(u64));
- strncpy(psvt->limit.string, psvt_ptr->limit.str_ptr, knob->string.length);
+ strscpy(psvt->limit.string, psvt_ptr->limit.str_ptr, ACPI_LIMIT_STR_MAX_LEN);
} else {
psvt->limit.integer = psvt_ptr->limit.integer;
}
@@ -468,7 +468,7 @@ static int fill_psvt(char __user *ubuf)
psvt_user[i].unlimit_coeff = psvts[i].unlimit_coeff;
psvt_user[i].control_knob_type = psvts[i].control_knob_type;
if (psvt_user[i].control_knob_type == ACPI_TYPE_STRING)
- strncpy(psvt_user[i].limit.string, psvts[i].limit.string,
+ strscpy(psvt_user[i].limit.string, psvts[i].limit.string,
ACPI_LIMIT_STR_MAX_LEN);
else
psvt_user[i].limit.integer = psvts[i].limit.integer;
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 427d370648d5..f8ebdd19d340 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -705,6 +705,7 @@ static const struct acpi_device_id int3400_thermal_match[] = {
{"INTC1040", 0},
{"INTC1041", 0},
{"INTC1042", 0},
+ {"INTC1068", 0},
{"INTC10A0", 0},
{}
};
diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
index 9b33fd3a66da..86901f9f54d8 100644
--- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
@@ -284,6 +284,7 @@ static const struct acpi_device_id int3403_device_ids[] = {
{"INTC1043", 0},
{"INTC1046", 0},
{"INTC1062", 0},
+ {"INTC1069", 0},
{"INTC10A1", 0},
{"", 0},
};
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c
index 4d3bd32ff9ea..af2ec0beb7a1 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c
@@ -156,3 +156,4 @@ unlock:
EXPORT_SYMBOL_NS_GPL(processor_thermal_mbox_interrupt_config, INT340X_THERMAL);
MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Processor Thermal Mail Box Interface");
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_power_floor.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_power_floor.c
index a1a108407f0f..d6b787ca2741 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_power_floor.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_power_floor.c
@@ -124,3 +124,4 @@ EXPORT_SYMBOL_NS_GPL(proc_thermal_power_floor_intr_callback, INT340X_THERMAL);
MODULE_IMPORT_NS(INT340X_THERMAL);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Processor Thermal power floor notification Interface");
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c
index e964a9375722..e9aa9e23aab9 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c
@@ -133,3 +133,4 @@ void proc_thermal_rapl_remove(void)
EXPORT_SYMBOL_GPL(proc_thermal_rapl_remove);
MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("RAPL interface using MMIO");
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
index 546b70434004..e56db75a94fb 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
@@ -384,3 +384,4 @@ void proc_thermal_rfim_remove(struct pci_dev *pdev)
EXPORT_SYMBOL_GPL(proc_thermal_rfim_remove);
MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Processor Thermal RFIM Interface");
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_wt_hint.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_wt_hint.c
index 9d5e4c169d1b..e947d84f4977 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_wt_hint.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_wt_hint.c
@@ -253,3 +253,4 @@ EXPORT_SYMBOL_NS_GPL(proc_thermal_wt_hint_remove, INT340X_THERMAL);
MODULE_IMPORT_NS(INT340X_THERMAL);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Processor Thermal Work Load type hint Interface");
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_wt_req.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_wt_req.c
index 711c4f761c9a..f298e7442662 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_wt_req.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_wt_req.c
@@ -134,3 +134,4 @@ EXPORT_SYMBOL_GPL(proc_thermal_wt_req_remove);
MODULE_IMPORT_NS(INT340X_THERMAL);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Processor Thermal Work Load type request Interface");
diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c
index 40d664a66cdc..a180a98bb9f1 100644
--- a/drivers/thermal/intel/intel_hfi.c
+++ b/drivers/thermal/intel/intel_hfi.c
@@ -159,14 +159,15 @@ struct hfi_cpu_info {
static DEFINE_PER_CPU(struct hfi_cpu_info, hfi_cpu_info) = { .index = -1 };
static int max_hfi_instances;
+static int hfi_clients_nr;
static struct hfi_instance *hfi_instances;
static struct hfi_features hfi_features;
static DEFINE_MUTEX(hfi_instance_lock);
static struct workqueue_struct *hfi_updates_wq;
-#define HFI_UPDATE_INTERVAL HZ
-#define HFI_MAX_THERM_NOTIFY_COUNT 16
+#define HFI_UPDATE_DELAY_MS 100
+#define HFI_THERMNL_CAPS_PER_EVENT 64
static void get_hfi_caps(struct hfi_instance *hfi_instance,
struct thermal_genl_cpu_caps *cpu_caps)
@@ -217,14 +218,14 @@ static void update_capabilities(struct hfi_instance *hfi_instance)
get_hfi_caps(hfi_instance, cpu_caps);
- if (cpu_count < HFI_MAX_THERM_NOTIFY_COUNT)
+ if (cpu_count < HFI_THERMNL_CAPS_PER_EVENT)
goto last_cmd;
- /* Process complete chunks of HFI_MAX_THERM_NOTIFY_COUNT capabilities. */
+ /* Process complete chunks of HFI_THERMNL_CAPS_PER_EVENT capabilities. */
for (i = 0;
- (i + HFI_MAX_THERM_NOTIFY_COUNT) <= cpu_count;
- i += HFI_MAX_THERM_NOTIFY_COUNT)
- thermal_genl_cpu_capability_event(HFI_MAX_THERM_NOTIFY_COUNT,
+ (i + HFI_THERMNL_CAPS_PER_EVENT) <= cpu_count;
+ i += HFI_THERMNL_CAPS_PER_EVENT)
+ thermal_genl_cpu_capability_event(HFI_THERMNL_CAPS_PER_EVENT,
&cpu_caps[i]);
cpu_count = cpu_count - i;
@@ -321,7 +322,7 @@ void intel_hfi_process_event(__u64 pkg_therm_status_msr_val)
raw_spin_unlock(&hfi_instance->event_lock);
queue_delayed_work(hfi_updates_wq, &hfi_instance->update_work,
- HFI_UPDATE_INTERVAL);
+ msecs_to_jiffies(HFI_UPDATE_DELAY_MS));
}
static void init_hfi_cpu_index(struct hfi_cpu_info *info)
@@ -477,8 +478,11 @@ void intel_hfi_online(unsigned int cpu)
enable:
cpumask_set_cpu(cpu, hfi_instance->cpus);
- /* Enable this HFI instance if this is its first online CPU. */
- if (cpumask_weight(hfi_instance->cpus) == 1) {
+ /*
+ * Enable this HFI instance if this is its first online CPU and
+ * there are user-space clients of thermal events.
+ */
+ if (cpumask_weight(hfi_instance->cpus) == 1 && hfi_clients_nr > 0) {
hfi_set_hw_table(hfi_instance);
hfi_enable();
}
@@ -573,18 +577,33 @@ static __init int hfi_parse_features(void)
return 0;
}
-static void hfi_do_enable(void)
+/*
+ * If concurrency is not prevented by other means, the HFI enable/disable
+ * routines must be called under hfi_instance_lock."
+ */
+static void hfi_enable_instance(void *ptr)
+{
+ hfi_set_hw_table(ptr);
+ hfi_enable();
+}
+
+static void hfi_disable_instance(void *ptr)
+{
+ hfi_disable();
+}
+
+static void hfi_syscore_resume(void)
{
/* This code runs only on the boot CPU. */
struct hfi_cpu_info *info = &per_cpu(hfi_cpu_info, 0);
struct hfi_instance *hfi_instance = info->hfi_instance;
/* No locking needed. There is no concurrency with CPU online. */
- hfi_set_hw_table(hfi_instance);
- hfi_enable();
+ if (hfi_clients_nr > 0)
+ hfi_enable_instance(hfi_instance);
}
-static int hfi_do_disable(void)
+static int hfi_syscore_suspend(void)
{
/* No locking needed. There is no concurrency with CPU offline. */
hfi_disable();
@@ -593,8 +612,58 @@ static int hfi_do_disable(void)
}
static struct syscore_ops hfi_pm_ops = {
- .resume = hfi_do_enable,
- .suspend = hfi_do_disable,
+ .resume = hfi_syscore_resume,
+ .suspend = hfi_syscore_suspend,
+};
+
+static int hfi_thermal_notify(struct notifier_block *nb, unsigned long state,
+ void *_notify)
+{
+ struct thermal_genl_notify *notify = _notify;
+ struct hfi_instance *hfi_instance;
+ smp_call_func_t func = NULL;
+ unsigned int cpu;
+ int i;
+
+ if (notify->mcgrp != THERMAL_GENL_EVENT_GROUP)
+ return NOTIFY_DONE;
+
+ if (state != THERMAL_NOTIFY_BIND && state != THERMAL_NOTIFY_UNBIND)
+ return NOTIFY_DONE;
+
+ mutex_lock(&hfi_instance_lock);
+
+ switch (state) {
+ case THERMAL_NOTIFY_BIND:
+ if (++hfi_clients_nr == 1)
+ func = hfi_enable_instance;
+ break;
+ case THERMAL_NOTIFY_UNBIND:
+ if (--hfi_clients_nr == 0)
+ func = hfi_disable_instance;
+ break;
+ }
+
+ if (!func)
+ goto out;
+
+ for (i = 0; i < max_hfi_instances; i++) {
+ hfi_instance = &hfi_instances[i];
+ if (cpumask_empty(hfi_instance->cpus))
+ continue;
+
+ cpu = cpumask_any(hfi_instance->cpus);
+ smp_call_function_single(cpu, func, hfi_instance, true);
+ }
+
+out:
+ mutex_unlock(&hfi_instance_lock);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block hfi_thermal_nb = {
+ .notifier_call = hfi_thermal_notify,
};
void __init intel_hfi_init(void)
@@ -628,10 +697,22 @@ void __init intel_hfi_init(void)
if (!hfi_updates_wq)
goto err_nomem;
+ /*
+ * Both thermal core and Intel HFI can not be build as modules.
+ * As kernel build-in drivers they are initialized before user-space
+ * starts, hence we can not miss BIND/UNBIND events when applications
+ * add/remove thermal multicast group to/from a netlink socket.
+ */
+ if (thermal_genl_register_notifier(&hfi_thermal_nb))
+ goto err_nl_notif;
+
register_syscore_ops(&hfi_pm_ops);
return;
+err_nl_notif:
+ destroy_workqueue(hfi_updates_wq);
+
err_nomem:
for (j = 0; j < i; ++j) {
hfi_instance = &hfi_instances[j];
diff --git a/drivers/thermal/intel/intel_soc_dts_iosf.c b/drivers/thermal/intel/intel_soc_dts_iosf.c
index 2ab943b66f7a..7adf942665d4 100644
--- a/drivers/thermal/intel/intel_soc_dts_iosf.c
+++ b/drivers/thermal/intel/intel_soc_dts_iosf.c
@@ -387,3 +387,4 @@ EXPORT_SYMBOL_GPL(intel_soc_dts_iosf_exit);
MODULE_IMPORT_NS(INTEL_TCC);
MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SoC DTS driver using side band interface");
diff --git a/drivers/thermal/k3_bandgap.c b/drivers/thermal/k3_bandgap.c
index e88192d2afea..2a703770fc91 100644
--- a/drivers/thermal/k3_bandgap.c
+++ b/drivers/thermal/k3_bandgap.c
@@ -78,7 +78,6 @@ static const int k3_adc_to_temp[] = {
struct k3_bandgap {
void __iomem *base;
- const struct k3_bandgap_data *conf;
};
/* common data structures */
diff --git a/drivers/thermal/loongson2_thermal.c b/drivers/thermal/loongson2_thermal.c
index 0f475fe46bc9..2d6b75b0539f 100644
--- a/drivers/thermal/loongson2_thermal.c
+++ b/drivers/thermal/loongson2_thermal.c
@@ -14,58 +14,81 @@
#include <linux/property.h>
#include <linux/thermal.h>
#include <linux/units.h>
+
#include "thermal_hwmon.h"
-#define LOONGSON2_MAX_SENSOR_SEL_NUM 3
+#define LOONGSON2_MAX_SENSOR_SEL_NUM 3
+
+#define LOONGSON2_THSENS_CTRL_HI_REG 0x0
+#define LOONGSON2_THSENS_CTRL_LOW_REG 0x8
+#define LOONGSON2_THSENS_STATUS_REG 0x10
+#define LOONGSON2_THSENS_OUT_REG 0x14
-#define LOONGSON2_THSENS_CTRL_HI_REG 0x0
-#define LOONGSON2_THSENS_CTRL_LOW_REG 0x8
-#define LOONGSON2_THSENS_STATUS_REG 0x10
-#define LOONGSON2_THSENS_OUT_REG 0x14
+#define LOONGSON2_THSENS_INT_LO BIT(0)
+#define LOONGSON2_THSENS_INT_HIGH BIT(1)
+#define LOONGSON2_THSENS_INT_EN (LOONGSON2_THSENS_INT_LO | \
+ LOONGSON2_THSENS_INT_HIGH)
+#define LOONGSON2_THSENS_OUT_MASK 0xFF
-#define LOONGSON2_THSENS_INT_LO BIT(0)
-#define LOONGSON2_THSENS_INT_HIGH BIT(1)
-#define LOONGSON2_THSENS_OUT_MASK 0xFF
+/*
+ * This flag is used to indicate the temperature reading
+ * method of the Loongson-2K2000
+ */
+#define LS2K2000_THSENS_OUT_FLAG BIT(0)
struct loongson2_thermal_chip_data {
- unsigned int thermal_sensor_sel;
+ unsigned int thermal_sensor_sel;
+ unsigned int flags;
};
struct loongson2_thermal_data {
- void __iomem *regs;
+ void __iomem *ctrl_reg;
+ void __iomem *temp_reg;
const struct loongson2_thermal_chip_data *chip_data;
};
+static void loongson2_set_ctrl_regs(struct loongson2_thermal_data *data,
+ int ctrl_data, bool low, bool enable)
+{
+ int reg_ctrl = 0;
+ int reg_off = data->chip_data->thermal_sensor_sel * 2;
+ int ctrl_reg = low ? LOONGSON2_THSENS_CTRL_LOW_REG : LOONGSON2_THSENS_CTRL_HI_REG;
+
+ reg_ctrl = ctrl_data + HECTO;
+ reg_ctrl |= enable ? 0x100 : 0;
+ writew(reg_ctrl, data->ctrl_reg + ctrl_reg + reg_off);
+}
+
static int loongson2_thermal_set(struct loongson2_thermal_data *data,
- int low, int high, bool enable)
+ int low, int high, bool enable)
{
- u64 reg_ctrl = 0;
- int reg_off = data->chip_data->thermal_sensor_sel * 2;
+ /* Set low temperature threshold */
+ loongson2_set_ctrl_regs(data, clamp(-40, low, high), true, enable);
- low = clamp(-40, low, high);
- high = clamp(125, low, high);
+ /* Set high temperature threshold */
+ loongson2_set_ctrl_regs(data, clamp(125, low, high), false, enable);
- low += HECTO;
- high += HECTO;
+ return 0;
+}
- reg_ctrl = low;
- reg_ctrl |= enable ? 0x100 : 0;
- writew(reg_ctrl, data->regs + LOONGSON2_THSENS_CTRL_LOW_REG + reg_off);
+static int loongson2_2k1000_get_temp(struct thermal_zone_device *tz, int *temp)
+{
+ int val;
+ struct loongson2_thermal_data *data = thermal_zone_device_priv(tz);
- reg_ctrl = high;
- reg_ctrl |= enable ? 0x100 : 0;
- writew(reg_ctrl, data->regs + LOONGSON2_THSENS_CTRL_HI_REG + reg_off);
+ val = readl(data->ctrl_reg + LOONGSON2_THSENS_OUT_REG);
+ *temp = ((val & LOONGSON2_THSENS_OUT_MASK) - HECTO) * KILO;
return 0;
}
-static int loongson2_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
+static int loongson2_2k2000_get_temp(struct thermal_zone_device *tz, int *temp)
{
- u32 reg_val;
+ int val;
struct loongson2_thermal_data *data = thermal_zone_device_priv(tz);
- reg_val = readl(data->regs + LOONGSON2_THSENS_OUT_REG);
- *temp = ((reg_val & LOONGSON2_THSENS_OUT_MASK) - HECTO) * KILO;
+ val = readl(data->temp_reg);
+ *temp = ((val & 0xffff) * 820 / 0x4000 - 311) * KILO;
return 0;
}
@@ -75,8 +98,7 @@ static irqreturn_t loongson2_thermal_irq_thread(int irq, void *dev)
struct thermal_zone_device *tzd = dev;
struct loongson2_thermal_data *data = thermal_zone_device_priv(tzd);
- writeb(LOONGSON2_THSENS_INT_LO | LOONGSON2_THSENS_INT_HIGH, data->regs +
- LOONGSON2_THSENS_STATUS_REG);
+ writeb(LOONGSON2_THSENS_INT_EN, data->ctrl_reg + LOONGSON2_THSENS_STATUS_REG);
thermal_zone_device_update(tzd, THERMAL_EVENT_UNSPECIFIED);
@@ -90,8 +112,8 @@ static int loongson2_thermal_set_trips(struct thermal_zone_device *tz, int low,
return loongson2_thermal_set(data, low/MILLI, high/MILLI, true);
}
-static const struct thermal_zone_device_ops loongson2_of_thermal_ops = {
- .get_temp = loongson2_thermal_get_temp,
+static struct thermal_zone_device_ops loongson2_of_thermal_ops = {
+ .get_temp = loongson2_2k1000_get_temp,
.set_trips = loongson2_thermal_set_trips,
};
@@ -108,22 +130,30 @@ static int loongson2_thermal_probe(struct platform_device *pdev)
data->chip_data = device_get_match_data(dev);
- data->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(data->regs))
- return PTR_ERR(data->regs);
+ data->ctrl_reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->ctrl_reg))
+ return PTR_ERR(data->ctrl_reg);
+
+ /* The temperature output register is separate for Loongson-2K2000 */
+ if (data->chip_data->flags & LS2K2000_THSENS_OUT_FLAG) {
+ data->temp_reg = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(data->temp_reg))
+ return PTR_ERR(data->temp_reg);
+
+ loongson2_of_thermal_ops.get_temp = loongson2_2k2000_get_temp;
+ }
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- writeb(LOONGSON2_THSENS_INT_LO | LOONGSON2_THSENS_INT_HIGH, data->regs +
- LOONGSON2_THSENS_STATUS_REG);
+ writeb(LOONGSON2_THSENS_INT_EN, data->ctrl_reg + LOONGSON2_THSENS_STATUS_REG);
loongson2_thermal_set(data, 0, 0, false);
for (i = 0; i <= LOONGSON2_MAX_SENSOR_SEL_NUM; i++) {
tzd = devm_thermal_of_zone_register(dev, i, data,
- &loongson2_of_thermal_ops);
+ &loongson2_of_thermal_ops);
if (!IS_ERR(tzd))
break;
@@ -135,7 +165,7 @@ static int loongson2_thermal_probe(struct platform_device *pdev)
}
ret = devm_request_threaded_irq(dev, irq, NULL, loongson2_thermal_irq_thread,
- IRQF_ONESHOT, "loongson2_thermal", tzd);
+ IRQF_ONESHOT, "loongson2_thermal", tzd);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to request alarm irq\n");
@@ -146,6 +176,12 @@ static int loongson2_thermal_probe(struct platform_device *pdev)
static const struct loongson2_thermal_chip_data loongson2_thermal_ls2k1000_data = {
.thermal_sensor_sel = 0,
+ .flags = 0,
+};
+
+static const struct loongson2_thermal_chip_data loongson2_thermal_ls2k2000_data = {
+ .thermal_sensor_sel = 0,
+ .flags = LS2K2000_THSENS_OUT_FLAG,
};
static const struct of_device_id of_loongson2_thermal_match[] = {
@@ -153,6 +189,10 @@ static const struct of_device_id of_loongson2_thermal_match[] = {
.compatible = "loongson,ls2k1000-thermal",
.data = &loongson2_thermal_ls2k1000_data,
},
+ {
+ .compatible = "loongson,ls2k2000-thermal",
+ .data = &loongson2_thermal_ls2k2000_data,
+ },
{ /* end */ }
};
MODULE_DEVICE_TABLE(of, of_loongson2_thermal_match);
@@ -167,4 +207,5 @@ static struct platform_driver loongson2_thermal_driver = {
module_platform_driver(loongson2_thermal_driver);
MODULE_DESCRIPTION("Loongson2 thermal driver");
+MODULE_AUTHOR("Loongson Technology Corporation Limited");
MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
index fd4bd650c77a..86b2f44355ac 100644
--- a/drivers/thermal/mediatek/lvts_thermal.c
+++ b/drivers/thermal/mediatek/lvts_thermal.c
@@ -91,9 +91,7 @@
#define LVTS_MSR_READ_TIMEOUT_US 400
#define LVTS_MSR_READ_WAIT_US (LVTS_MSR_READ_TIMEOUT_US / 2)
-#define LVTS_HW_SHUTDOWN_MT7988 105000
-#define LVTS_HW_SHUTDOWN_MT8192 105000
-#define LVTS_HW_SHUTDOWN_MT8195 105000
+#define LVTS_HW_TSHUT_TEMP 105000
#define LVTS_MINIMUM_THRESHOLD 20000
@@ -102,22 +100,36 @@ static int golden_temp_offset;
struct lvts_sensor_data {
int dt_id;
+ u8 cal_offsets[3];
};
struct lvts_ctrl_data {
struct lvts_sensor_data lvts_sensor[LVTS_SENSOR_MAX];
int cal_offset[LVTS_SENSOR_MAX];
- int hw_tshut_temp;
int num_lvts_sensor;
+ u8 valid_sensor_mask;
int offset;
int mode;
};
+#define VALID_SENSOR_MAP(s0, s1, s2, s3) \
+ .valid_sensor_mask = (((s0) ? BIT(0) : 0) | \
+ ((s1) ? BIT(1) : 0) | \
+ ((s2) ? BIT(2) : 0) | \
+ ((s3) ? BIT(3) : 0))
+
+#define lvts_for_each_valid_sensor(i, lvts_ctrl_data) \
+ for ((i) = 0; (i) < LVTS_SENSOR_MAX; (i)++) \
+ if (!((lvts_ctrl_data)->valid_sensor_mask & BIT(i))) \
+ continue; \
+ else
+
struct lvts_data {
const struct lvts_ctrl_data *lvts_ctrl;
int num_lvts_ctrl;
int temp_factor;
int temp_offset;
+ int gt_calib_bit_offset;
};
struct lvts_sensor {
@@ -135,7 +147,6 @@ struct lvts_ctrl {
const struct lvts_data *lvts_data;
u32 calibration[LVTS_SENSOR_MAX];
u32 hw_tshut_raw_temp;
- int num_lvts_sensor;
int mode;
void __iomem *base;
int low_thresh;
@@ -347,7 +358,7 @@ static bool lvts_should_update_thresh(struct lvts_ctrl *lvts_ctrl, int high)
if (high > lvts_ctrl->high_thresh)
return true;
- for (i = 0; i < lvts_ctrl->num_lvts_sensor; i++)
+ lvts_for_each_valid_sensor(i, lvts_ctrl->lvts_data->lvts_ctrl)
if (lvts_ctrl->sensors[i].high_thresh == lvts_ctrl->high_thresh
&& lvts_ctrl->sensors[i].low_thresh == lvts_ctrl->low_thresh)
return false;
@@ -551,6 +562,7 @@ static int lvts_sensor_init(struct device *dev, struct lvts_ctrl *lvts_ctrl,
const struct lvts_ctrl_data *lvts_ctrl_data)
{
struct lvts_sensor *lvts_sensor = lvts_ctrl->sensors;
+
void __iomem *msr_regs[] = {
LVTS_MSR0(lvts_ctrl->base),
LVTS_MSR1(lvts_ctrl->base),
@@ -567,7 +579,7 @@ static int lvts_sensor_init(struct device *dev, struct lvts_ctrl *lvts_ctrl,
int i;
- for (i = 0; i < lvts_ctrl_data->num_lvts_sensor; i++) {
+ lvts_for_each_valid_sensor(i, lvts_ctrl_data) {
int dt_id = lvts_ctrl_data->lvts_sensor[i].dt_id;
@@ -607,8 +619,6 @@ static int lvts_sensor_init(struct device *dev, struct lvts_ctrl *lvts_ctrl,
lvts_sensor[i].high_thresh = INT_MIN;
};
- lvts_ctrl->num_lvts_sensor = lvts_ctrl_data->num_lvts_sensor;
-
return 0;
}
@@ -668,18 +678,31 @@ static int lvts_sensor_init(struct device *dev, struct lvts_ctrl *lvts_ctrl,
* <-----ap--tc#3-----> <-----sensor#7-----> <-----sensor#8----->
* 0x40 | 0x41 | 0x42 | 0x43 | 0x44 | 0x45 | 0x46 | 0x47 | 0x48
*
- * The data description gives the offset of the calibration data in
- * this bytes stream for each sensor.
+ * Note: In some cases, values don't strictly follow a little endian ordering.
+ * The data description gives byte offsets constituting each calibration value
+ * for each sensor.
*/
static int lvts_calibration_init(struct device *dev, struct lvts_ctrl *lvts_ctrl,
const struct lvts_ctrl_data *lvts_ctrl_data,
- u8 *efuse_calibration)
+ u8 *efuse_calibration,
+ size_t calib_len)
{
int i;
- for (i = 0; i < lvts_ctrl_data->num_lvts_sensor; i++)
- memcpy(&lvts_ctrl->calibration[i],
- efuse_calibration + lvts_ctrl_data->cal_offset[i], 2);
+ lvts_for_each_valid_sensor(i, lvts_ctrl_data) {
+ const struct lvts_sensor_data *sensor =
+ &lvts_ctrl_data->lvts_sensor[i];
+
+ if (sensor->cal_offsets[0] >= calib_len ||
+ sensor->cal_offsets[1] >= calib_len ||
+ sensor->cal_offsets[2] >= calib_len)
+ return -EINVAL;
+
+ lvts_ctrl->calibration[i] =
+ (efuse_calibration[sensor->cal_offsets[0]] << 0) +
+ (efuse_calibration[sensor->cal_offsets[1]] << 8) +
+ (efuse_calibration[sensor->cal_offsets[2]] << 16);
+ }
return 0;
}
@@ -734,16 +757,21 @@ static int lvts_calibration_read(struct device *dev, struct lvts_domain *lvts_td
return 0;
}
-static int lvts_golden_temp_init(struct device *dev, u32 *value, int temp_offset)
+static int lvts_golden_temp_init(struct device *dev, u8 *calib,
+ const struct lvts_data *lvts_data)
{
u32 gt;
- gt = (*value) >> 24;
+ /*
+ * The golden temp information is contained in the first 32-bit
+ * word of efuse data at a specific bit offset.
+ */
+ gt = (((u32 *)calib)[0] >> lvts_data->gt_calib_bit_offset) & 0xff;
if (gt && gt < LVTS_GOLDEN_TEMP_MAX)
golden_temp = gt;
- golden_temp_offset = golden_temp * 500 + temp_offset;
+ golden_temp_offset = golden_temp * 500 + lvts_data->temp_offset;
return 0;
}
@@ -762,11 +790,7 @@ static int lvts_ctrl_init(struct device *dev, struct lvts_domain *lvts_td,
if (ret)
return ret;
- /*
- * The golden temp information is contained in the first chunk
- * of efuse data.
- */
- ret = lvts_golden_temp_init(dev, (u32 *)lvts_td->calib, lvts_data->temp_offset);
+ ret = lvts_golden_temp_init(dev, lvts_td->calib, lvts_data);
if (ret)
return ret;
@@ -786,7 +810,8 @@ static int lvts_ctrl_init(struct device *dev, struct lvts_domain *lvts_td,
ret = lvts_calibration_init(dev, &lvts_ctrl[i],
&lvts_data->lvts_ctrl[i],
- lvts_td->calib);
+ lvts_td->calib,
+ lvts_td->calib_len);
if (ret)
return ret;
@@ -801,7 +826,7 @@ static int lvts_ctrl_init(struct device *dev, struct lvts_domain *lvts_td,
* after initializing the calibration.
*/
lvts_ctrl[i].hw_tshut_raw_temp =
- lvts_temp_to_raw(lvts_data->lvts_ctrl[i].hw_tshut_temp,
+ lvts_temp_to_raw(LVTS_HW_TSHUT_TEMP,
lvts_data->temp_factor);
lvts_ctrl[i].low_thresh = INT_MIN;
@@ -1089,7 +1114,7 @@ static int lvts_ctrl_start(struct device *dev, struct lvts_ctrl *lvts_ctrl)
u32 *sensor_bitmap = lvts_ctrl->mode == LVTS_MSR_IMMEDIATE_MODE ?
sensor_imm_bitmap : sensor_filt_bitmap;
- for (i = 0; i < lvts_ctrl->num_lvts_sensor; i++) {
+ lvts_for_each_valid_sensor(i, lvts_ctrl->lvts_data->lvts_ctrl) {
int dt_id = lvts_sensors[i].dt_id;
@@ -1302,28 +1327,32 @@ static void lvts_remove(struct platform_device *pdev)
static const struct lvts_ctrl_data mt7988_lvts_ap_data_ctrl[] = {
{
- .cal_offset = { 0x00, 0x04, 0x08, 0x0c },
.lvts_sensor = {
- { .dt_id = MT7988_CPU_0 },
- { .dt_id = MT7988_CPU_1 },
- { .dt_id = MT7988_ETH2P5G_0 },
- { .dt_id = MT7988_ETH2P5G_1 }
+ { .dt_id = MT7988_CPU_0,
+ .cal_offsets = { 0x00, 0x01, 0x02 } },
+ { .dt_id = MT7988_CPU_1,
+ .cal_offsets = { 0x04, 0x05, 0x06 } },
+ { .dt_id = MT7988_ETH2P5G_0,
+ .cal_offsets = { 0x08, 0x09, 0x0a } },
+ { .dt_id = MT7988_ETH2P5G_1,
+ .cal_offsets = { 0x0c, 0x0d, 0x0e } }
},
- .num_lvts_sensor = 4,
+ VALID_SENSOR_MAP(1, 1, 1, 1),
.offset = 0x0,
- .hw_tshut_temp = LVTS_HW_SHUTDOWN_MT7988,
},
{
- .cal_offset = { 0x14, 0x18, 0x1c, 0x20 },
.lvts_sensor = {
- { .dt_id = MT7988_TOPS_0},
- { .dt_id = MT7988_TOPS_1},
- { .dt_id = MT7988_ETHWARP_0},
- { .dt_id = MT7988_ETHWARP_1}
+ { .dt_id = MT7988_TOPS_0,
+ .cal_offsets = { 0x14, 0x15, 0x16 } },
+ { .dt_id = MT7988_TOPS_1,
+ .cal_offsets = { 0x18, 0x19, 0x1a } },
+ { .dt_id = MT7988_ETHWARP_0,
+ .cal_offsets = { 0x1c, 0x1d, 0x1e } },
+ { .dt_id = MT7988_ETHWARP_1,
+ .cal_offsets = { 0x20, 0x21, 0x22 } }
},
- .num_lvts_sensor = 4,
+ VALID_SENSOR_MAP(1, 1, 1, 1),
.offset = 0x100,
- .hw_tshut_temp = LVTS_HW_SHUTDOWN_MT7988,
}
};
@@ -1359,164 +1388,301 @@ static int lvts_resume(struct device *dev)
return 0;
}
+/*
+ * The MT8186 calibration data is stored as packed 3-byte little-endian
+ * values using a weird layout that makes sense only when viewed as a 32-bit
+ * hexadecimal word dump. Let's suppose SxBy where x = sensor number and
+ * y = byte number where the LSB is y=0. We then have:
+ *
+ * [S0B2-S0B1-S0B0-S1B2] [S1B1-S1B0-S2B2-S2B1] [S2B0-S3B2-S3B1-S3B0]
+ *
+ * However, when considering a byte stream, those appear as follows:
+ *
+ * [S1B2] [S0B0[ [S0B1] [S0B2] [S2B1] [S2B2] [S1B0] [S1B1] [S3B0] [S3B1] [S3B2] [S2B0]
+ *
+ * Hence the rather confusing offsets provided below.
+ */
+static const struct lvts_ctrl_data mt8186_lvts_data_ctrl[] = {
+ {
+ .lvts_sensor = {
+ { .dt_id = MT8186_LITTLE_CPU0,
+ .cal_offsets = { 5, 6, 7 } },
+ { .dt_id = MT8186_LITTLE_CPU1,
+ .cal_offsets = { 10, 11, 4 } },
+ { .dt_id = MT8186_LITTLE_CPU2,
+ .cal_offsets = { 15, 8, 9 } },
+ { .dt_id = MT8186_CAM,
+ .cal_offsets = { 12, 13, 14 } }
+ },
+ VALID_SENSOR_MAP(1, 1, 1, 1),
+ .offset = 0x0,
+ },
+ {
+ .lvts_sensor = {
+ { .dt_id = MT8186_BIG_CPU0,
+ .cal_offsets = { 22, 23, 16 } },
+ { .dt_id = MT8186_BIG_CPU1,
+ .cal_offsets = { 27, 20, 21 } }
+ },
+ VALID_SENSOR_MAP(1, 1, 0, 0),
+ .offset = 0x100,
+ },
+ {
+ .lvts_sensor = {
+ { .dt_id = MT8186_NNA,
+ .cal_offsets = { 29, 30, 31 } },
+ { .dt_id = MT8186_ADSP,
+ .cal_offsets = { 34, 35, 28 } },
+ { .dt_id = MT8186_MFG,
+ .cal_offsets = { 39, 32, 33 } }
+ },
+ VALID_SENSOR_MAP(1, 1, 1, 0),
+ .offset = 0x200,
+ }
+};
+
+static const struct lvts_ctrl_data mt8188_lvts_mcu_data_ctrl[] = {
+ {
+ .lvts_sensor = {
+ { .dt_id = MT8188_MCU_LITTLE_CPU0,
+ .cal_offsets = { 22, 23, 24 } },
+ { .dt_id = MT8188_MCU_LITTLE_CPU1,
+ .cal_offsets = { 25, 26, 27 } },
+ { .dt_id = MT8188_MCU_LITTLE_CPU2,
+ .cal_offsets = { 28, 29, 30 } },
+ { .dt_id = MT8188_MCU_LITTLE_CPU3,
+ .cal_offsets = { 31, 32, 33 } },
+ },
+ VALID_SENSOR_MAP(1, 1, 1, 1),
+ .offset = 0x0,
+ .mode = LVTS_MSR_FILTERED_MODE,
+ },
+ {
+ .lvts_sensor = {
+ { .dt_id = MT8188_MCU_BIG_CPU0,
+ .cal_offsets = { 34, 35, 36 } },
+ { .dt_id = MT8188_MCU_BIG_CPU1,
+ .cal_offsets = { 37, 38, 39 } },
+ },
+ VALID_SENSOR_MAP(1, 1, 0, 0),
+ .offset = 0x100,
+ .mode = LVTS_MSR_FILTERED_MODE,
+ }
+};
+
+static const struct lvts_ctrl_data mt8188_lvts_ap_data_ctrl[] = {
+ {
+ .lvts_sensor = {
+
+ { /* unused */ },
+ { .dt_id = MT8188_AP_APU,
+ .cal_offsets = { 40, 41, 42 } },
+ },
+ VALID_SENSOR_MAP(0, 1, 0, 0),
+ .offset = 0x0,
+ .mode = LVTS_MSR_FILTERED_MODE,
+ },
+ {
+ .lvts_sensor = {
+ { .dt_id = MT8188_AP_GPU1,
+ .cal_offsets = { 43, 44, 45 } },
+ { .dt_id = MT8188_AP_GPU2,
+ .cal_offsets = { 46, 47, 48 } },
+ { .dt_id = MT8188_AP_SOC1,
+ .cal_offsets = { 49, 50, 51 } },
+ },
+ VALID_SENSOR_MAP(1, 1, 1, 0),
+ .offset = 0x100,
+ .mode = LVTS_MSR_FILTERED_MODE,
+ },
+ {
+ .lvts_sensor = {
+ { .dt_id = MT8188_AP_SOC2,
+ .cal_offsets = { 52, 53, 54 } },
+ { .dt_id = MT8188_AP_SOC3,
+ .cal_offsets = { 55, 56, 57 } },
+ },
+ VALID_SENSOR_MAP(1, 1, 0, 0),
+ .offset = 0x200,
+ .mode = LVTS_MSR_FILTERED_MODE,
+ },
+ {
+ .lvts_sensor = {
+ { .dt_id = MT8188_AP_CAM1,
+ .cal_offsets = { 58, 59, 60 } },
+ { .dt_id = MT8188_AP_CAM2,
+ .cal_offsets = { 61, 62, 63 } },
+ },
+ VALID_SENSOR_MAP(1, 1, 0, 0),
+ .offset = 0x300,
+ .mode = LVTS_MSR_FILTERED_MODE,
+ }
+};
+
static const struct lvts_ctrl_data mt8192_lvts_mcu_data_ctrl[] = {
{
- .cal_offset = { 0x04, 0x08 },
.lvts_sensor = {
- { .dt_id = MT8192_MCU_BIG_CPU0 },
- { .dt_id = MT8192_MCU_BIG_CPU1 }
+ { .dt_id = MT8192_MCU_BIG_CPU0,
+ .cal_offsets = { 0x04, 0x05, 0x06 } },
+ { .dt_id = MT8192_MCU_BIG_CPU1,
+ .cal_offsets = { 0x08, 0x09, 0x0a } }
},
- .num_lvts_sensor = 2,
+ VALID_SENSOR_MAP(1, 1, 0, 0),
.offset = 0x0,
- .hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8192,
.mode = LVTS_MSR_FILTERED_MODE,
},
{
- .cal_offset = { 0x0c, 0x10 },
.lvts_sensor = {
- { .dt_id = MT8192_MCU_BIG_CPU2 },
- { .dt_id = MT8192_MCU_BIG_CPU3 }
+ { .dt_id = MT8192_MCU_BIG_CPU2,
+ .cal_offsets = { 0x0c, 0x0d, 0x0e } },
+ { .dt_id = MT8192_MCU_BIG_CPU3,
+ .cal_offsets = { 0x10, 0x11, 0x12 } }
},
- .num_lvts_sensor = 2,
+ VALID_SENSOR_MAP(1, 1, 0, 0),
.offset = 0x100,
- .hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8192,
.mode = LVTS_MSR_FILTERED_MODE,
},
{
- .cal_offset = { 0x14, 0x18, 0x1c, 0x20 },
.lvts_sensor = {
- { .dt_id = MT8192_MCU_LITTLE_CPU0 },
- { .dt_id = MT8192_MCU_LITTLE_CPU1 },
- { .dt_id = MT8192_MCU_LITTLE_CPU2 },
- { .dt_id = MT8192_MCU_LITTLE_CPU3 }
+ { .dt_id = MT8192_MCU_LITTLE_CPU0,
+ .cal_offsets = { 0x14, 0x15, 0x16 } },
+ { .dt_id = MT8192_MCU_LITTLE_CPU1,
+ .cal_offsets = { 0x18, 0x19, 0x1a } },
+ { .dt_id = MT8192_MCU_LITTLE_CPU2,
+ .cal_offsets = { 0x1c, 0x1d, 0x1e } },
+ { .dt_id = MT8192_MCU_LITTLE_CPU3,
+ .cal_offsets = { 0x20, 0x21, 0x22 } }
},
- .num_lvts_sensor = 4,
+ VALID_SENSOR_MAP(1, 1, 1, 1),
.offset = 0x200,
- .hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8192,
.mode = LVTS_MSR_FILTERED_MODE,
}
};
static const struct lvts_ctrl_data mt8192_lvts_ap_data_ctrl[] = {
- {
- .cal_offset = { 0x24, 0x28 },
+ {
.lvts_sensor = {
- { .dt_id = MT8192_AP_VPU0 },
- { .dt_id = MT8192_AP_VPU1 }
+ { .dt_id = MT8192_AP_VPU0,
+ .cal_offsets = { 0x24, 0x25, 0x26 } },
+ { .dt_id = MT8192_AP_VPU1,
+ .cal_offsets = { 0x28, 0x29, 0x2a } }
},
- .num_lvts_sensor = 2,
+ VALID_SENSOR_MAP(1, 1, 0, 0),
.offset = 0x0,
- .hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8192,
},
{
- .cal_offset = { 0x2c, 0x30 },
.lvts_sensor = {
- { .dt_id = MT8192_AP_GPU0 },
- { .dt_id = MT8192_AP_GPU1 }
+ { .dt_id = MT8192_AP_GPU0,
+ .cal_offsets = { 0x2c, 0x2d, 0x2e } },
+ { .dt_id = MT8192_AP_GPU1,
+ .cal_offsets = { 0x30, 0x31, 0x32 } }
},
- .num_lvts_sensor = 2,
+ VALID_SENSOR_MAP(1, 1, 0, 0),
.offset = 0x100,
- .hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8192,
},
{
- .cal_offset = { 0x34, 0x38 },
.lvts_sensor = {
- { .dt_id = MT8192_AP_INFRA },
- { .dt_id = MT8192_AP_CAM },
+ { .dt_id = MT8192_AP_INFRA,
+ .cal_offsets = { 0x34, 0x35, 0x36 } },
+ { .dt_id = MT8192_AP_CAM,
+ .cal_offsets = { 0x38, 0x39, 0x3a } },
},
- .num_lvts_sensor = 2,
+ VALID_SENSOR_MAP(1, 1, 0, 0),
.offset = 0x200,
- .hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8192,
},
{
- .cal_offset = { 0x3c, 0x40, 0x44 },
.lvts_sensor = {
- { .dt_id = MT8192_AP_MD0 },
- { .dt_id = MT8192_AP_MD1 },
- { .dt_id = MT8192_AP_MD2 }
+ { .dt_id = MT8192_AP_MD0,
+ .cal_offsets = { 0x3c, 0x3d, 0x3e } },
+ { .dt_id = MT8192_AP_MD1,
+ .cal_offsets = { 0x40, 0x41, 0x42 } },
+ { .dt_id = MT8192_AP_MD2,
+ .cal_offsets = { 0x44, 0x45, 0x46 } }
},
- .num_lvts_sensor = 3,
+ VALID_SENSOR_MAP(1, 1, 1, 0),
.offset = 0x300,
- .hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8192,
}
};
static const struct lvts_ctrl_data mt8195_lvts_mcu_data_ctrl[] = {
{
- .cal_offset = { 0x04, 0x07 },
.lvts_sensor = {
- { .dt_id = MT8195_MCU_BIG_CPU0 },
- { .dt_id = MT8195_MCU_BIG_CPU1 }
+ { .dt_id = MT8195_MCU_BIG_CPU0,
+ .cal_offsets = { 0x04, 0x05, 0x06 } },
+ { .dt_id = MT8195_MCU_BIG_CPU1,
+ .cal_offsets = { 0x07, 0x08, 0x09 } }
},
- .num_lvts_sensor = 2,
+ VALID_SENSOR_MAP(1, 1, 0, 0),
.offset = 0x0,
- .hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8195,
},
{
- .cal_offset = { 0x0d, 0x10 },
.lvts_sensor = {
- { .dt_id = MT8195_MCU_BIG_CPU2 },
- { .dt_id = MT8195_MCU_BIG_CPU3 }
+ { .dt_id = MT8195_MCU_BIG_CPU2,
+ .cal_offsets = { 0x0d, 0x0e, 0x0f } },
+ { .dt_id = MT8195_MCU_BIG_CPU3,
+ .cal_offsets = { 0x10, 0x11, 0x12 } }
},
- .num_lvts_sensor = 2,
+ VALID_SENSOR_MAP(1, 1, 0, 0),
.offset = 0x100,
- .hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8195,
},
{
- .cal_offset = { 0x16, 0x19, 0x1c, 0x1f },
.lvts_sensor = {
- { .dt_id = MT8195_MCU_LITTLE_CPU0 },
- { .dt_id = MT8195_MCU_LITTLE_CPU1 },
- { .dt_id = MT8195_MCU_LITTLE_CPU2 },
- { .dt_id = MT8195_MCU_LITTLE_CPU3 }
+ { .dt_id = MT8195_MCU_LITTLE_CPU0,
+ .cal_offsets = { 0x16, 0x17, 0x18 } },
+ { .dt_id = MT8195_MCU_LITTLE_CPU1,
+ .cal_offsets = { 0x19, 0x1a, 0x1b } },
+ { .dt_id = MT8195_MCU_LITTLE_CPU2,
+ .cal_offsets = { 0x1c, 0x1d, 0x1e } },
+ { .dt_id = MT8195_MCU_LITTLE_CPU3,
+ .cal_offsets = { 0x1f, 0x20, 0x21 } }
},
- .num_lvts_sensor = 4,
+ VALID_SENSOR_MAP(1, 1, 1, 1),
.offset = 0x200,
- .hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8195,
}
};
static const struct lvts_ctrl_data mt8195_lvts_ap_data_ctrl[] = {
- {
- .cal_offset = { 0x25, 0x28 },
+ {
.lvts_sensor = {
- { .dt_id = MT8195_AP_VPU0 },
- { .dt_id = MT8195_AP_VPU1 }
+ { .dt_id = MT8195_AP_VPU0,
+ .cal_offsets = { 0x25, 0x26, 0x27 } },
+ { .dt_id = MT8195_AP_VPU1,
+ .cal_offsets = { 0x28, 0x29, 0x2a } }
},
- .num_lvts_sensor = 2,
+ VALID_SENSOR_MAP(1, 1, 0, 0),
.offset = 0x0,
- .hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8195,
},
{
- .cal_offset = { 0x2e, 0x31 },
.lvts_sensor = {
- { .dt_id = MT8195_AP_GPU0 },
- { .dt_id = MT8195_AP_GPU1 }
+ { .dt_id = MT8195_AP_GPU0,
+ .cal_offsets = { 0x2e, 0x2f, 0x30 } },
+ { .dt_id = MT8195_AP_GPU1,
+ .cal_offsets = { 0x31, 0x32, 0x33 } }
},
- .num_lvts_sensor = 2,
+ VALID_SENSOR_MAP(1, 1, 0, 0),
.offset = 0x100,
- .hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8195,
},
{
- .cal_offset = { 0x37, 0x3a, 0x3d },
.lvts_sensor = {
- { .dt_id = MT8195_AP_VDEC },
- { .dt_id = MT8195_AP_IMG },
- { .dt_id = MT8195_AP_INFRA },
+ { .dt_id = MT8195_AP_VDEC,
+ .cal_offsets = { 0x37, 0x38, 0x39 } },
+ { .dt_id = MT8195_AP_IMG,
+ .cal_offsets = { 0x3a, 0x3b, 0x3c } },
+ { .dt_id = MT8195_AP_INFRA,
+ .cal_offsets = { 0x3d, 0x3e, 0x3f } }
},
- .num_lvts_sensor = 3,
+ VALID_SENSOR_MAP(1, 1, 1, 0),
.offset = 0x200,
- .hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8195,
},
{
- .cal_offset = { 0x43, 0x46 },
.lvts_sensor = {
- { .dt_id = MT8195_AP_CAM0 },
- { .dt_id = MT8195_AP_CAM1 }
+ { .dt_id = MT8195_AP_CAM0,
+ .cal_offsets = { 0x43, 0x44, 0x45 } },
+ { .dt_id = MT8195_AP_CAM1,
+ .cal_offsets = { 0x46, 0x47, 0x48 } }
},
- .num_lvts_sensor = 2,
+ VALID_SENSOR_MAP(1, 1, 0, 0),
.offset = 0x300,
- .hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8195,
}
};
@@ -1525,16 +1691,47 @@ static const struct lvts_data mt7988_lvts_ap_data = {
.num_lvts_ctrl = ARRAY_SIZE(mt7988_lvts_ap_data_ctrl),
.temp_factor = LVTS_COEFF_A_MT7988,
.temp_offset = LVTS_COEFF_B_MT7988,
+ .gt_calib_bit_offset = 24,
+};
+
+static const struct lvts_data mt8186_lvts_data = {
+ .lvts_ctrl = mt8186_lvts_data_ctrl,
+ .num_lvts_ctrl = ARRAY_SIZE(mt8186_lvts_data_ctrl),
+ .temp_factor = LVTS_COEFF_A_MT7988,
+ .temp_offset = LVTS_COEFF_B_MT7988,
+ .gt_calib_bit_offset = 24,
+};
+
+static const struct lvts_data mt8188_lvts_mcu_data = {
+ .lvts_ctrl = mt8188_lvts_mcu_data_ctrl,
+ .num_lvts_ctrl = ARRAY_SIZE(mt8188_lvts_mcu_data_ctrl),
+ .temp_factor = LVTS_COEFF_A_MT8195,
+ .temp_offset = LVTS_COEFF_B_MT8195,
+ .gt_calib_bit_offset = 20,
+};
+
+static const struct lvts_data mt8188_lvts_ap_data = {
+ .lvts_ctrl = mt8188_lvts_ap_data_ctrl,
+ .num_lvts_ctrl = ARRAY_SIZE(mt8188_lvts_ap_data_ctrl),
+ .temp_factor = LVTS_COEFF_A_MT8195,
+ .temp_offset = LVTS_COEFF_B_MT8195,
+ .gt_calib_bit_offset = 20,
};
static const struct lvts_data mt8192_lvts_mcu_data = {
.lvts_ctrl = mt8192_lvts_mcu_data_ctrl,
.num_lvts_ctrl = ARRAY_SIZE(mt8192_lvts_mcu_data_ctrl),
+ .temp_factor = LVTS_COEFF_A_MT8195,
+ .temp_offset = LVTS_COEFF_B_MT8195,
+ .gt_calib_bit_offset = 24,
};
static const struct lvts_data mt8192_lvts_ap_data = {
.lvts_ctrl = mt8192_lvts_ap_data_ctrl,
.num_lvts_ctrl = ARRAY_SIZE(mt8192_lvts_ap_data_ctrl),
+ .temp_factor = LVTS_COEFF_A_MT8195,
+ .temp_offset = LVTS_COEFF_B_MT8195,
+ .gt_calib_bit_offset = 24,
};
static const struct lvts_data mt8195_lvts_mcu_data = {
@@ -1542,6 +1739,7 @@ static const struct lvts_data mt8195_lvts_mcu_data = {
.num_lvts_ctrl = ARRAY_SIZE(mt8195_lvts_mcu_data_ctrl),
.temp_factor = LVTS_COEFF_A_MT8195,
.temp_offset = LVTS_COEFF_B_MT8195,
+ .gt_calib_bit_offset = 24,
};
static const struct lvts_data mt8195_lvts_ap_data = {
@@ -1549,10 +1747,14 @@ static const struct lvts_data mt8195_lvts_ap_data = {
.num_lvts_ctrl = ARRAY_SIZE(mt8195_lvts_ap_data_ctrl),
.temp_factor = LVTS_COEFF_A_MT8195,
.temp_offset = LVTS_COEFF_B_MT8195,
+ .gt_calib_bit_offset = 24,
};
static const struct of_device_id lvts_of_match[] = {
{ .compatible = "mediatek,mt7988-lvts-ap", .data = &mt7988_lvts_ap_data },
+ { .compatible = "mediatek,mt8186-lvts", .data = &mt8186_lvts_data },
+ { .compatible = "mediatek,mt8188-lvts-mcu", .data = &mt8188_lvts_mcu_data },
+ { .compatible = "mediatek,mt8188-lvts-ap", .data = &mt8188_lvts_ap_data },
{ .compatible = "mediatek,mt8192-lvts-mcu", .data = &mt8192_lvts_mcu_data },
{ .compatible = "mediatek,mt8192-lvts-ap", .data = &mt8192_lvts_ap_data },
{ .compatible = "mediatek,mt8195-lvts-mcu", .data = &mt8195_lvts_mcu_data },
diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c
index f6edb12ec004..5225b3621a56 100644
--- a/drivers/thermal/qcom/lmh.c
+++ b/drivers/thermal/qcom/lmh.c
@@ -95,6 +95,9 @@ static int lmh_probe(struct platform_device *pdev)
unsigned int enable_alg;
u32 node_id;
+ if (!qcom_scm_is_available())
+ return -EPROBE_DEFER;
+
lmh_data = devm_kzalloc(dev, sizeof(*lmh_data), GFP_KERNEL);
if (!lmh_data)
return -ENOMEM;
diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
index 78c5cfe6a0c0..3cd74f6cac8f 100644
--- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
@@ -74,7 +74,6 @@ struct qpnp_tm_chip {
long temp;
unsigned int thresh;
unsigned int stage;
- unsigned int prev_stage;
unsigned int base;
/* protects .thresh, .stage and chip registers */
struct mutex lock;
diff --git a/drivers/thermal/qcom/tsens-v2.c b/drivers/thermal/qcom/tsens-v2.c
index 29a61d2d6ca3..0cb7301eca6e 100644
--- a/drivers/thermal/qcom/tsens-v2.c
+++ b/drivers/thermal/qcom/tsens-v2.c
@@ -107,6 +107,7 @@ static const struct reg_field tsens_v2_regfields[MAX_REGFIELDS] = {
static const struct tsens_ops ops_generic_v2 = {
.init = init_common,
.get_temp = get_temp_tsens_valid,
+ .resume = tsens_resume_common,
};
struct tsens_plat_data data_tsens_v2 = {
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index 6d7c16ccb44d..e76e23026dc8 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -17,6 +17,7 @@
#include <linux/pm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/suspend.h>
#include <linux/thermal.h>
#include "../thermal_hwmon.h"
#include "tsens.h"
@@ -264,7 +265,7 @@ void compute_intercept_slope(struct tsens_priv *priv, u32 *p1,
for (i = 0; i < priv->num_sensors; i++) {
dev_dbg(priv->dev,
"%s: sensor%d - data_point1:%#x data_point2:%#x\n",
- __func__, i, p1[i], p2[i]);
+ __func__, i, p1[i], p2 ? p2[i] : 0);
if (!priv->sensor[i].slope)
priv->sensor[i].slope = SLOPE_DEFAULT;
@@ -1193,6 +1194,36 @@ static int tsens_register_irq(struct tsens_priv *priv, char *irqname,
return ret;
}
+#ifdef CONFIG_SUSPEND
+static int tsens_reinit(struct tsens_priv *priv)
+{
+ if (tsens_version(priv) >= VER_2_X) {
+ /*
+ * Re-enable the watchdog, unmask the bark.
+ * Disable cycle completion monitoring
+ */
+ if (priv->feat->has_watchdog) {
+ regmap_field_write(priv->rf[WDOG_BARK_MASK], 0);
+ regmap_field_write(priv->rf[CC_MON_MASK], 1);
+ }
+
+ /* Re-enable interrupts */
+ tsens_enable_irq(priv);
+ }
+
+ return 0;
+}
+
+int tsens_resume_common(struct tsens_priv *priv)
+{
+ if (pm_suspend_target_state == PM_SUSPEND_MEM)
+ tsens_reinit(priv);
+
+ return 0;
+}
+
+#endif /* !CONFIG_SUSPEND */
+
static int tsens_register(struct tsens_priv *priv)
{
int i, ret;
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index cb637fa289ca..cab39de045b1 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -634,6 +634,11 @@ void compute_intercept_slope(struct tsens_priv *priv, u32 *pt1, u32 *pt2, u32 mo
int init_common(struct tsens_priv *priv);
int get_temp_tsens_valid(const struct tsens_sensor *s, int *temp);
int get_temp_common(const struct tsens_sensor *s, int *temp);
+#ifdef CONFIG_SUSPEND
+int tsens_resume_common(struct tsens_priv *priv);
+#else
+#define tsens_resume_common NULL
+#endif
/* TSENS target */
extern struct tsens_plat_data data_8960;
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index a764cb1115a5..02494fa142c3 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -65,26 +65,29 @@
#define TSC_MAX_NUM 5
-/* Structure for thermal temperature calculation */
-struct equation_coefs {
- int a1;
- int b1;
- int a2;
- int b2;
-};
-
struct rcar_gen3_thermal_priv;
struct rcar_thermal_info {
- int ths_tj_1;
+ int scale;
+ int adj_below;
+ int adj_above;
void (*read_fuses)(struct rcar_gen3_thermal_priv *priv);
};
+struct equation_set_coef {
+ int a;
+ int b;
+};
+
struct rcar_gen3_thermal_tsc {
+ struct rcar_gen3_thermal_priv *priv;
void __iomem *base;
struct thermal_zone_device *zone;
- struct equation_coefs coef;
- int tj_t;
+ /* Different coefficients are used depending on a threshold. */
+ struct {
+ struct equation_set_coef below;
+ struct equation_set_coef above;
+ } coef;
int thcode[3];
};
@@ -93,6 +96,7 @@ struct rcar_gen3_thermal_priv {
struct thermal_zone_device_ops ops;
unsigned int num_tscs;
int ptat[3];
+ int tj_t;
const struct rcar_thermal_info *info;
};
@@ -111,84 +115,75 @@ static inline void rcar_gen3_thermal_write(struct rcar_gen3_thermal_tsc *tsc,
/*
* Linear approximation for temperature
*
- * [reg] = [temp] * a + b => [temp] = ([reg] - b) / a
+ * [temp] = ((thadj - [reg]) * a) / b + adj
+ * [reg] = thadj - ([temp] - adj) * b / a
*
* The constants a and b are calculated using two triplets of int values PTAT
* and THCODE. PTAT and THCODE can either be read from hardware or use hard
- * coded values from driver. The formula to calculate a and b are taken from
- * BSP and sparsely documented and understood.
+ * coded values from the driver. The formula to calculate a and b are taken from
+ * the datasheet. Different calculations are needed for a and b depending on
+ * if the input variables ([temp] or [reg]) are above or below a threshold. The
+ * threshold is also calculated from PTAT and THCODE using formulas from the
+ * datasheet.
+ *
+ * The constant thadj is one of the THCODE values, which one to use depends on
+ * the threshold and input value.
*
- * Examining the linear formula and the formula used to calculate constants a
- * and b while knowing that the span for PTAT and THCODE values are between
- * 0x000 and 0xfff the largest integer possible is 0xfff * 0xfff == 0xffe001.
- * Integer also needs to be signed so that leaves 7 bits for binary
- * fixed point scaling.
+ * The constants adj is taken verbatim from the datasheet. Two values exists,
+ * which one to use depends on the input value and the calculated threshold.
+ * Furthermore different SoC models supported by the driver have different sets
+ * of values. The values for each model are stored in the device match data.
*/
-#define FIXPT_SHIFT 7
-#define FIXPT_INT(_x) ((_x) << FIXPT_SHIFT)
-#define INT_FIXPT(_x) ((_x) >> FIXPT_SHIFT)
-#define FIXPT_DIV(_a, _b) DIV_ROUND_CLOSEST(((_a) << FIXPT_SHIFT), (_b))
-#define FIXPT_TO_MCELSIUS(_x) ((_x) * 1000 >> FIXPT_SHIFT)
-
-#define RCAR3_THERMAL_GRAN 500 /* mili Celsius */
-
-/* no idea where these constants come from */
-#define TJ_3 -41
-
-static void rcar_gen3_thermal_calc_coefs(struct rcar_gen3_thermal_priv *priv,
- struct rcar_gen3_thermal_tsc *tsc,
- int ths_tj_1)
+static void rcar_gen3_thermal_shared_coefs(struct rcar_gen3_thermal_priv *priv)
{
- /* TODO: Find documentation and document constant calculation formula */
-
- /*
- * Division is not scaled in BSP and if scaled it might overflow
- * the dividend (4095 * 4095 << 14 > INT_MAX) so keep it unscaled
- */
- tsc->tj_t = (FIXPT_INT((priv->ptat[1] - priv->ptat[2]) * (ths_tj_1 - TJ_3))
- / (priv->ptat[0] - priv->ptat[2])) + FIXPT_INT(TJ_3);
-
- tsc->coef.a1 = FIXPT_DIV(FIXPT_INT(tsc->thcode[1] - tsc->thcode[2]),
- tsc->tj_t - FIXPT_INT(TJ_3));
- tsc->coef.b1 = FIXPT_INT(tsc->thcode[2]) - tsc->coef.a1 * TJ_3;
-
- tsc->coef.a2 = FIXPT_DIV(FIXPT_INT(tsc->thcode[1] - tsc->thcode[0]),
- tsc->tj_t - FIXPT_INT(ths_tj_1));
- tsc->coef.b2 = FIXPT_INT(tsc->thcode[0]) - tsc->coef.a2 * ths_tj_1;
+ priv->tj_t =
+ DIV_ROUND_CLOSEST((priv->ptat[1] - priv->ptat[2]) * priv->info->scale,
+ priv->ptat[0] - priv->ptat[2])
+ + priv->info->adj_below;
}
-
-static int rcar_gen3_thermal_round(int temp)
+static void rcar_gen3_thermal_tsc_coefs(struct rcar_gen3_thermal_priv *priv,
+ struct rcar_gen3_thermal_tsc *tsc)
{
- int result, round_offs;
+ tsc->coef.below.a = priv->info->scale * (priv->ptat[2] - priv->ptat[1]);
+ tsc->coef.above.a = priv->info->scale * (priv->ptat[0] - priv->ptat[1]);
- round_offs = temp >= 0 ? RCAR3_THERMAL_GRAN / 2 :
- -RCAR3_THERMAL_GRAN / 2;
- result = (temp + round_offs) / RCAR3_THERMAL_GRAN;
- return result * RCAR3_THERMAL_GRAN;
+ tsc->coef.below.b = (priv->ptat[2] - priv->ptat[0]) * (tsc->thcode[2] - tsc->thcode[1]);
+ tsc->coef.above.b = (priv->ptat[0] - priv->ptat[2]) * (tsc->thcode[1] - tsc->thcode[0]);
}
static int rcar_gen3_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
struct rcar_gen3_thermal_tsc *tsc = thermal_zone_device_priv(tz);
- int mcelsius, val;
- int reg;
+ struct rcar_gen3_thermal_priv *priv = tsc->priv;
+ const struct equation_set_coef *coef;
+ int adj, decicelsius, reg, thcode;
/* Read register and convert to mili Celsius */
reg = rcar_gen3_thermal_read(tsc, REG_GEN3_TEMP) & CTEMP_MASK;
- if (reg <= tsc->thcode[1])
- val = FIXPT_DIV(FIXPT_INT(reg) - tsc->coef.b1,
- tsc->coef.a1);
- else
- val = FIXPT_DIV(FIXPT_INT(reg) - tsc->coef.b2,
- tsc->coef.a2);
- mcelsius = FIXPT_TO_MCELSIUS(val);
+ if (reg < tsc->thcode[1]) {
+ adj = priv->info->adj_below;
+ coef = &tsc->coef.below;
+ thcode = tsc->thcode[2];
+ } else {
+ adj = priv->info->adj_above;
+ coef = &tsc->coef.above;
+ thcode = tsc->thcode[0];
+ }
+
+ /*
+ * The dividend can't be grown as it might overflow, instead shorten the
+ * divisor to convert to decidegree Celsius. If we convert after the
+ * division precision is lost as we will scale up from whole degrees
+ * Celsius.
+ */
+ decicelsius = DIV_ROUND_CLOSEST(coef->a * (thcode - reg), coef->b / 10);
/* Guaranteed operating range is -40C to 125C. */
- /* Round value to device granularity setting */
- *temp = rcar_gen3_thermal_round(mcelsius);
+ /* Reporting is done in millidegree Celsius */
+ *temp = decicelsius * 100 + adj * 1000;
return 0;
}
@@ -196,15 +191,22 @@ static int rcar_gen3_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
static int rcar_gen3_thermal_mcelsius_to_temp(struct rcar_gen3_thermal_tsc *tsc,
int mcelsius)
{
- int celsius, val;
+ struct rcar_gen3_thermal_priv *priv = tsc->priv;
+ const struct equation_set_coef *coef;
+ int adj, celsius, thcode;
celsius = DIV_ROUND_CLOSEST(mcelsius, 1000);
- if (celsius <= INT_FIXPT(tsc->tj_t))
- val = celsius * tsc->coef.a1 + tsc->coef.b1;
- else
- val = celsius * tsc->coef.a2 + tsc->coef.b2;
+ if (celsius < priv->tj_t) {
+ coef = &tsc->coef.below;
+ adj = priv->info->adj_below;
+ thcode = tsc->thcode[2];
+ } else {
+ coef = &tsc->coef.above;
+ adj = priv->info->adj_above;
+ thcode = tsc->thcode[0];
+ }
- return INT_FIXPT(val);
+ return thcode - DIV_ROUND_CLOSEST((celsius - adj) * coef->b, coef->a);
}
static int rcar_gen3_thermal_set_trips(struct thermal_zone_device *tz, int low, int high)
@@ -369,17 +371,23 @@ static void rcar_gen3_thermal_init(struct rcar_gen3_thermal_priv *priv,
}
static const struct rcar_thermal_info rcar_m3w_thermal_info = {
- .ths_tj_1 = 116,
+ .scale = 157,
+ .adj_below = -41,
+ .adj_above = 116,
.read_fuses = rcar_gen3_thermal_read_fuses_gen3,
};
static const struct rcar_thermal_info rcar_gen3_thermal_info = {
- .ths_tj_1 = 126,
+ .scale = 167,
+ .adj_below = -41,
+ .adj_above = 126,
.read_fuses = rcar_gen3_thermal_read_fuses_gen3,
};
static const struct rcar_thermal_info rcar_gen4_thermal_info = {
- .ths_tj_1 = 126,
+ .scale = 167,
+ .adj_below = -41,
+ .adj_above = 126,
.read_fuses = rcar_gen3_thermal_read_fuses_gen4,
};
@@ -516,6 +524,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
goto error_unregister;
}
+ tsc->priv = priv;
tsc->base = devm_ioremap_resource(dev, res);
if (IS_ERR(tsc->base)) {
ret = PTR_ERR(tsc->base);
@@ -530,11 +539,13 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
if (!rcar_gen3_thermal_read_fuses(priv))
dev_info(dev, "No calibration values fused, fallback to driver values\n");
+ rcar_gen3_thermal_shared_coefs(priv);
+
for (i = 0; i < priv->num_tscs; i++) {
struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i];
rcar_gen3_thermal_init(priv, tsc);
- rcar_gen3_thermal_calc_coefs(priv, tsc, priv->info->ths_tj_1);
+ rcar_gen3_thermal_tsc_coefs(priv, tsc);
zone = devm_thermal_of_zone_register(dev, i, tsc, &priv->ops);
if (IS_ERR(zone)) {
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 34a31bc72023..11750a145d74 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/kdev_t.h>
#include <linux/idr.h>
+#include <linux/list_sort.h>
#include <linux/thermal.h>
#include <linux/reboot.h>
#include <linux/string.h>
@@ -295,17 +296,18 @@ static void monitor_thermal_zone(struct thermal_zone_device *tz)
{
if (tz->mode != THERMAL_DEVICE_ENABLED)
thermal_zone_device_set_polling(tz, 0);
- else if (tz->passive)
+ else if (tz->passive > 0)
thermal_zone_device_set_polling(tz, tz->passive_delay_jiffies);
else if (tz->polling_delay_jiffies)
thermal_zone_device_set_polling(tz, tz->polling_delay_jiffies);
}
-static void handle_non_critical_trips(struct thermal_zone_device *tz,
- const struct thermal_trip *trip)
+static struct thermal_governor *thermal_get_tz_governor(struct thermal_zone_device *tz)
{
- tz->governor ? tz->governor->throttle(tz, trip) :
- def_governor->throttle(tz, trip);
+ if (tz->governor)
+ return tz->governor;
+
+ return def_governor;
}
void thermal_governor_update_tz(struct thermal_zone_device *tz,
@@ -348,10 +350,6 @@ void thermal_zone_device_critical_reboot(struct thermal_zone_device *tz)
static void handle_critical_trips(struct thermal_zone_device *tz,
const struct thermal_trip *trip)
{
- /* If we have not crossed the trip_temp, we do not care. */
- if (trip->temperature <= 0 || tz->temperature < trip->temperature)
- return;
-
trace_thermal_zone_trip(tz, thermal_zone_trip_id(tz, trip), trip->type);
if (trip->type == THERMAL_TRIP_CRITICAL)
@@ -361,55 +359,60 @@ static void handle_critical_trips(struct thermal_zone_device *tz,
}
static void handle_thermal_trip(struct thermal_zone_device *tz,
- struct thermal_trip *trip)
+ struct thermal_trip_desc *td,
+ struct list_head *way_up_list,
+ struct list_head *way_down_list)
{
+ const struct thermal_trip *trip = &td->trip;
+ int old_threshold;
+
if (trip->temperature == THERMAL_TEMP_INVALID)
return;
- if (tz->last_temperature == THERMAL_TEMP_INVALID) {
- /* Initialization. */
- trip->threshold = trip->temperature;
- if (tz->temperature >= trip->threshold)
- trip->threshold -= trip->hysteresis;
- } else if (tz->last_temperature < trip->threshold) {
+ /*
+ * If the trip temperature or hysteresis has been updated recently,
+ * the threshold needs to be computed again using the new values.
+ * However, its initial value still reflects the old ones and that
+ * is what needs to be compared with the previous zone temperature
+ * to decide which action to take.
+ */
+ old_threshold = td->threshold;
+ td->threshold = trip->temperature;
+
+ if (tz->last_temperature >= old_threshold &&
+ tz->last_temperature != THERMAL_TEMP_INVALID) {
/*
- * The trip threshold is equal to the trip temperature, unless
- * the latter has changed in the meantime. In either case,
- * the trip is crossed if the current zone temperature is at
- * least equal to its temperature, but otherwise ensure that
- * the threshold and the trip temperature will be equal.
+ * Mitigation is under way, so it needs to stop if the zone
+ * temperature falls below the low temperature of the trip.
+ * In that case, the trip temperature becomes the new threshold.
*/
- if (tz->temperature >= trip->temperature) {
- thermal_notify_tz_trip_up(tz, trip);
- thermal_debug_tz_trip_up(tz, trip);
- trip->threshold = trip->temperature - trip->hysteresis;
+ if (tz->temperature < trip->temperature - trip->hysteresis) {
+ list_add(&td->notify_list_node, way_down_list);
+ td->notify_temp = trip->temperature - trip->hysteresis;
+
+ if (trip->type == THERMAL_TRIP_PASSIVE) {
+ tz->passive--;
+ WARN_ON(tz->passive < 0);
+ }
} else {
- trip->threshold = trip->temperature;
+ td->threshold -= trip->hysteresis;
}
- } else {
+ } else if (tz->temperature >= trip->temperature) {
/*
- * The previous zone temperature was above or equal to the trip
- * threshold, which would be equal to the "low temperature" of
- * the trip (its temperature minus its hysteresis), unless the
- * trip temperature or hysteresis had changed. In either case,
- * the trip is crossed if the current zone temperature is below
- * the low temperature of the trip, but otherwise ensure that
- * the trip threshold will be equal to the low temperature of
- * the trip.
+ * There is no mitigation under way, so it needs to be started
+ * if the zone temperature exceeds the trip one. The new
+ * threshold is then set to the low temperature of the trip.
*/
- if (tz->temperature < trip->temperature - trip->hysteresis) {
- thermal_notify_tz_trip_down(tz, trip);
- thermal_debug_tz_trip_down(tz, trip);
- trip->threshold = trip->temperature;
- } else {
- trip->threshold = trip->temperature - trip->hysteresis;
- }
+ list_add_tail(&td->notify_list_node, way_up_list);
+ td->notify_temp = trip->temperature;
+ td->threshold -= trip->hysteresis;
+
+ if (trip->type == THERMAL_TRIP_PASSIVE)
+ tz->passive++;
+ else if (trip->type == THERMAL_TRIP_CRITICAL ||
+ trip->type == THERMAL_TRIP_HOT)
+ handle_critical_trips(tz, trip);
}
-
- if (trip->type == THERMAL_TRIP_CRITICAL || trip->type == THERMAL_TRIP_HOT)
- handle_critical_trips(tz, trip);
- else
- handle_non_critical_trips(tz, trip);
}
static void update_temperature(struct thermal_zone_device *tz)
@@ -431,7 +434,6 @@ static void update_temperature(struct thermal_zone_device *tz)
trace_thermal_temperature(tz);
thermal_genl_sampling_temp(tz->id, temp);
- thermal_debug_update_temp(tz);
}
static void thermal_zone_device_check(struct work_struct *work)
@@ -449,16 +451,41 @@ static void thermal_zone_device_init(struct thermal_zone_device *tz)
INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_check);
tz->temperature = THERMAL_TEMP_INVALID;
+ tz->passive = 0;
tz->prev_low_trip = -INT_MAX;
tz->prev_high_trip = INT_MAX;
list_for_each_entry(pos, &tz->thermal_instances, tz_node)
pos->initialized = false;
}
+static void thermal_governor_trip_crossed(struct thermal_governor *governor,
+ struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ bool crossed_up)
+{
+ if (governor->trip_crossed)
+ governor->trip_crossed(tz, trip, crossed_up);
+}
+
+static int thermal_trip_notify_cmp(void *ascending, const struct list_head *a,
+ const struct list_head *b)
+{
+ struct thermal_trip_desc *tda = container_of(a, struct thermal_trip_desc,
+ notify_list_node);
+ struct thermal_trip_desc *tdb = container_of(b, struct thermal_trip_desc,
+ notify_list_node);
+ int ret = tdb->notify_temp - tda->notify_temp;
+
+ return ascending ? ret : -ret;
+}
+
void __thermal_zone_device_update(struct thermal_zone_device *tz,
enum thermal_notify_event event)
{
- struct thermal_trip *trip;
+ struct thermal_governor *governor = thermal_get_tz_governor(tz);
+ struct thermal_trip_desc *td;
+ LIST_HEAD(way_down_list);
+ LIST_HEAD(way_up_list);
if (tz->suspended)
return;
@@ -468,12 +495,34 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz,
update_temperature(tz);
+ if (tz->temperature == THERMAL_TEMP_INVALID)
+ return;
+
__thermal_zone_set_trips(tz);
tz->notify_event = event;
- for_each_trip(tz, trip)
- handle_thermal_trip(tz, trip);
+ for_each_trip_desc(tz, td)
+ handle_thermal_trip(tz, td, &way_up_list, &way_down_list);
+
+ list_sort(&way_up_list, &way_up_list, thermal_trip_notify_cmp);
+ list_for_each_entry(td, &way_up_list, notify_list_node) {
+ thermal_notify_tz_trip_up(tz, &td->trip);
+ thermal_debug_tz_trip_up(tz, &td->trip);
+ thermal_governor_trip_crossed(governor, tz, &td->trip, true);
+ }
+
+ list_sort(NULL, &way_down_list, thermal_trip_notify_cmp);
+ list_for_each_entry(td, &way_down_list, notify_list_node) {
+ thermal_notify_tz_trip_down(tz, &td->trip);
+ thermal_debug_tz_trip_down(tz, &td->trip);
+ thermal_governor_trip_crossed(governor, tz, &td->trip, false);
+ }
+
+ if (governor->manage)
+ governor->manage(tz);
+
+ thermal_debug_update_trip_stats(tz);
monitor_thermal_zone(tz);
}
@@ -766,7 +815,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
if (trip_index < 0 || trip_index >= tz->num_trips)
return -EINVAL;
- return thermal_bind_cdev_to_trip(tz, &tz->trips[trip_index], cdev,
+ return thermal_bind_cdev_to_trip(tz, &tz->trips[trip_index].trip, cdev,
upper, lower, weight);
}
EXPORT_SYMBOL_GPL(thermal_zone_bind_cooling_device);
@@ -825,7 +874,7 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
if (trip_index < 0 || trip_index >= tz->num_trips)
return -EINVAL;
- return thermal_unbind_cdev_from_trip(tz, &tz->trips[trip_index], cdev);
+ return thermal_unbind_cdev_from_trip(tz, &tz->trips[trip_index].trip, cdev);
}
EXPORT_SYMBOL_GPL(thermal_zone_unbind_cooling_device);
@@ -897,6 +946,7 @@ __thermal_cooling_device_register(struct device_node *np,
{
struct thermal_cooling_device *cdev;
struct thermal_zone_device *pos = NULL;
+ unsigned long current_state;
int id, ret;
if (!ops || !ops->get_max_state || !ops->get_cur_state ||
@@ -934,6 +984,10 @@ __thermal_cooling_device_register(struct device_node *np,
if (ret)
goto out_cdev_type;
+ ret = cdev->ops->get_cur_state(cdev, &current_state);
+ if (ret)
+ goto out_cdev_type;
+
thermal_cooling_device_setup_sysfs(cdev);
ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
@@ -947,6 +1001,8 @@ __thermal_cooling_device_register(struct device_node *np,
return ERR_PTR(ret);
}
+ thermal_debug_cdev_add(cdev, current_state);
+
/* Add 'this' new cdev to the global cdev list */
mutex_lock(&thermal_list_lock);
@@ -962,8 +1018,6 @@ __thermal_cooling_device_register(struct device_node *np,
mutex_unlock(&thermal_list_lock);
- thermal_debug_cdev_add(cdev);
-
return cdev;
out_cooling_dev:
@@ -1221,16 +1275,19 @@ static void thermal_set_delay_jiffies(unsigned long *delay_jiffies, int delay_ms
int thermal_zone_get_crit_temp(struct thermal_zone_device *tz, int *temp)
{
- int i, ret = -EINVAL;
+ const struct thermal_trip_desc *td;
+ int ret = -EINVAL;
if (tz->ops.get_crit_temp)
return tz->ops.get_crit_temp(tz, temp);
mutex_lock(&tz->lock);
- for (i = 0; i < tz->num_trips; i++) {
- if (tz->trips[i].type == THERMAL_TRIP_CRITICAL) {
- *temp = tz->trips[i].temperature;
+ for_each_trip_desc(tz, td) {
+ const struct thermal_trip *trip = &td->trip;
+
+ if (trip->type == THERMAL_TRIP_CRITICAL) {
+ *temp = trip->temperature;
ret = 0;
break;
}
@@ -1274,7 +1331,9 @@ thermal_zone_device_register_with_trips(const char *type,
const struct thermal_zone_params *tzp,
int passive_delay, int polling_delay)
{
+ const struct thermal_trip *trip = trips;
struct thermal_zone_device *tz;
+ struct thermal_trip_desc *td;
int id;
int result;
struct thermal_governor *governor;
@@ -1339,7 +1398,8 @@ thermal_zone_device_register_with_trips(const char *type,
tz->device.class = thermal_class;
tz->devdata = devdata;
tz->num_trips = num_trips;
- memcpy(tz->trips, trips, num_trips * sizeof(*trips));
+ for_each_trip_desc(tz, td)
+ td->trip = *trip++;
thermal_set_delay_jiffies(&tz->passive_delay_jiffies, passive_delay);
thermal_set_delay_jiffies(&tz->polling_delay_jiffies, polling_delay);
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 0d8a42bb7ce8..d9785e5bbb08 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -15,6 +15,120 @@
#include "thermal_netlink.h"
#include "thermal_debugfs.h"
+struct thermal_trip_desc {
+ struct thermal_trip trip;
+ struct list_head notify_list_node;
+ int notify_temp;
+ int threshold;
+};
+
+/**
+ * struct thermal_governor - structure that holds thermal governor information
+ * @name: name of the governor
+ * @bind_to_tz: callback called when binding to a thermal zone. If it
+ * returns 0, the governor is bound to the thermal zone,
+ * otherwise it fails.
+ * @unbind_from_tz: callback called when a governor is unbound from a
+ * thermal zone.
+ * @trip_crossed: called for trip points that have just been crossed
+ * @manage: called on thermal zone temperature updates
+ * @update_tz: callback called when thermal zone internals have changed, e.g.
+ * thermal cooling instance was added/removed
+ * @governor_list: node in thermal_governor_list (in thermal_core.c)
+ */
+struct thermal_governor {
+ const char *name;
+ int (*bind_to_tz)(struct thermal_zone_device *tz);
+ void (*unbind_from_tz)(struct thermal_zone_device *tz);
+ void (*trip_crossed)(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ bool crossed_up);
+ void (*manage)(struct thermal_zone_device *tz);
+ void (*update_tz)(struct thermal_zone_device *tz,
+ enum thermal_notify_event reason);
+ struct list_head governor_list;
+};
+
+/**
+ * struct thermal_zone_device - structure for a thermal zone
+ * @id: unique id number for each thermal zone
+ * @type: the thermal zone device type
+ * @device: &struct device for this thermal zone
+ * @removal: removal completion
+ * @trip_temp_attrs: attributes for trip points for sysfs: trip temperature
+ * @trip_type_attrs: attributes for trip points for sysfs: trip type
+ * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
+ * @mode: current mode of this thermal zone
+ * @devdata: private pointer for device private data
+ * @num_trips: number of trip points the thermal zone supports
+ * @passive_delay_jiffies: number of jiffies to wait between polls when
+ * performing passive cooling.
+ * @polling_delay_jiffies: number of jiffies to wait between polls when
+ * checking whether trip points have been crossed (0 for
+ * interrupt driven systems)
+ * @temperature: current temperature. This is only for core code,
+ * drivers should use thermal_zone_get_temp() to get the
+ * current temperature
+ * @last_temperature: previous temperature read
+ * @emul_temperature: emulated temperature when using CONFIG_THERMAL_EMULATION
+ * @passive: 1 if you've crossed a passive trip point, 0 otherwise.
+ * @prev_low_trip: the low current temperature if you've crossed a passive
+ trip point.
+ * @prev_high_trip: the above current temperature if you've crossed a
+ passive trip point.
+ * @need_update: if equals 1, thermal_zone_device_update needs to be invoked.
+ * @ops: operations this &thermal_zone_device supports
+ * @tzp: thermal zone parameters
+ * @governor: pointer to the governor for this thermal zone
+ * @governor_data: private pointer for governor data
+ * @thermal_instances: list of &struct thermal_instance of this thermal zone
+ * @ida: &struct ida to generate unique id for this zone's cooling
+ * devices
+ * @lock: lock to protect thermal_instances list
+ * @node: node in thermal_tz_list (in thermal_core.c)
+ * @poll_queue: delayed work for polling
+ * @notify_event: Last notification event
+ * @suspended: thermal zone suspend indicator
+ * @trips: array of struct thermal_trip objects
+ */
+struct thermal_zone_device {
+ int id;
+ char type[THERMAL_NAME_LENGTH];
+ struct device device;
+ struct completion removal;
+ struct attribute_group trips_attribute_group;
+ struct thermal_attr *trip_temp_attrs;
+ struct thermal_attr *trip_type_attrs;
+ struct thermal_attr *trip_hyst_attrs;
+ enum thermal_device_mode mode;
+ void *devdata;
+ int num_trips;
+ unsigned long passive_delay_jiffies;
+ unsigned long polling_delay_jiffies;
+ int temperature;
+ int last_temperature;
+ int emul_temperature;
+ int passive;
+ int prev_low_trip;
+ int prev_high_trip;
+ atomic_t need_update;
+ struct thermal_zone_device_ops ops;
+ struct thermal_zone_params *tzp;
+ struct thermal_governor *governor;
+ void *governor_data;
+ struct list_head thermal_instances;
+ struct ida ida;
+ struct mutex lock;
+ struct list_head node;
+ struct delayed_work poll_queue;
+ enum thermal_notify_event notify_event;
+ bool suspended;
+#ifdef CONFIG_THERMAL_DEBUGFS
+ struct thermal_debugfs *debugfs;
+#endif
+ struct thermal_trip_desc trips[] __counted_by(num_trips);
+};
+
/* Default Thermal Governor */
#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
#define DEFAULT_THERMAL_GOVERNOR "step_wise"
@@ -120,8 +234,11 @@ void thermal_governor_update_tz(struct thermal_zone_device *tz,
enum thermal_notify_event reason);
/* Helpers */
-#define for_each_trip(__tz, __trip) \
- for (__trip = __tz->trips; __trip - __tz->trips < __tz->num_trips; __trip++)
+#define for_each_trip_desc(__tz, __td) \
+ for (__td = __tz->trips; __td - __tz->trips < __tz->num_trips; __td++)
+
+#define trip_to_trip_desc(__trip) \
+ container_of(__trip, struct thermal_trip_desc, trip)
void __thermal_zone_set_trips(struct thermal_zone_device *tz);
int thermal_zone_trip_id(const struct thermal_zone_device *tz,
diff --git a/drivers/thermal/thermal_debugfs.c b/drivers/thermal/thermal_debugfs.c
index c617e8b9f0dd..91f9c21235a8 100644
--- a/drivers/thermal/thermal_debugfs.c
+++ b/drivers/thermal/thermal_debugfs.c
@@ -139,11 +139,13 @@ struct tz_episode {
* we keep track of the current position in the history array.
*
* @tz_episodes: a list of thermal mitigation episodes
+ * @tz: thermal zone this object belongs to
* @trips_crossed: an array of trip points crossed by id
* @nr_trips: the number of trip points currently being crossed
*/
struct tz_debugfs {
struct list_head tz_episodes;
+ struct thermal_zone_device *tz;
int *trips_crossed;
int nr_trips;
};
@@ -433,6 +435,14 @@ void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev,
}
cdev_dbg->current_state = new_state;
+
+ /*
+ * Create a record for the new state if it is not there, so its
+ * duration will be printed by cdev_dt_seq_show() as expected if it
+ * runs before the next state transition.
+ */
+ thermal_debugfs_cdev_record_get(thermal_dbg, cdev_dbg->durations, new_state);
+
transition = (old_state << 16) | new_state;
/*
@@ -458,8 +468,9 @@ void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev,
* Allocates a cooling device object for debug, initializes the
* statistics and create the entries in sysfs.
* @cdev: a pointer to a cooling device
+ * @state: current state of the cooling device
*/
-void thermal_debug_cdev_add(struct thermal_cooling_device *cdev)
+void thermal_debug_cdev_add(struct thermal_cooling_device *cdev, int state)
{
struct thermal_debugfs *thermal_dbg;
struct cdev_debugfs *cdev_dbg;
@@ -476,9 +487,16 @@ void thermal_debug_cdev_add(struct thermal_cooling_device *cdev)
INIT_LIST_HEAD(&cdev_dbg->durations[i]);
}
- cdev_dbg->current_state = 0;
+ cdev_dbg->current_state = state;
cdev_dbg->timestamp = ktime_get();
+ /*
+ * Create a record for the initial cooling device state, so its
+ * duration will be printed by cdev_dt_seq_show() as expected if it
+ * runs before the first state transition.
+ */
+ thermal_debugfs_cdev_record_get(thermal_dbg, cdev_dbg->durations, state);
+
debugfs_create_file("trans_table", 0400, thermal_dbg->d_top,
thermal_dbg, &tt_fops);
@@ -503,15 +521,23 @@ void thermal_debug_cdev_add(struct thermal_cooling_device *cdev)
*/
void thermal_debug_cdev_remove(struct thermal_cooling_device *cdev)
{
- struct thermal_debugfs *thermal_dbg = cdev->debugfs;
+ struct thermal_debugfs *thermal_dbg;
- if (!thermal_dbg)
+ mutex_lock(&cdev->lock);
+
+ thermal_dbg = cdev->debugfs;
+ if (!thermal_dbg) {
+ mutex_unlock(&cdev->lock);
return;
+ }
+
+ cdev->debugfs = NULL;
+
+ mutex_unlock(&cdev->lock);
mutex_lock(&thermal_dbg->lock);
thermal_debugfs_cdev_clear(&thermal_dbg->cdev_dbg);
- cdev->debugfs = NULL;
mutex_unlock(&thermal_dbg->lock);
@@ -530,6 +556,7 @@ static struct tz_episode *thermal_debugfs_tz_event_alloc(struct thermal_zone_dev
INIT_LIST_HEAD(&tze->node);
tze->timestamp = now;
+ tze->duration = KTIME_MIN;
for (i = 0; i < tz->num_trips; i++) {
tze->trip_stats[i].min = INT_MAX;
@@ -545,7 +572,6 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
struct tz_episode *tze;
struct tz_debugfs *tz_dbg;
struct thermal_debugfs *thermal_dbg = tz->debugfs;
- int temperature = tz->temperature;
int trip_id = thermal_zone_trip_id(tz, trip);
ktime_t now = ktime_get();
@@ -614,11 +640,6 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node);
tze->trip_stats[trip_id].timestamp = now;
- tze->trip_stats[trip_id].max = max(tze->trip_stats[trip_id].max, temperature);
- tze->trip_stats[trip_id].min = min(tze->trip_stats[trip_id].min, temperature);
- tze->trip_stats[trip_id].avg = tze->trip_stats[trip_id].avg +
- (temperature - tze->trip_stats[trip_id].avg) /
- tze->trip_stats[trip_id].count;
unlock:
mutex_unlock(&thermal_dbg->lock);
@@ -671,6 +692,9 @@ void thermal_debug_tz_trip_down(struct thermal_zone_device *tz,
tze->trip_stats[trip_id].duration =
ktime_add(delta, tze->trip_stats[trip_id].duration);
+ /* Mark the end of mitigation for this trip point. */
+ tze->trip_stats[trip_id].timestamp = KTIME_MAX;
+
/*
* This event closes the mitigation as we are crossing the
* last trip point the way down.
@@ -682,12 +706,12 @@ out:
mutex_unlock(&thermal_dbg->lock);
}
-void thermal_debug_update_temp(struct thermal_zone_device *tz)
+void thermal_debug_update_trip_stats(struct thermal_zone_device *tz)
{
struct thermal_debugfs *thermal_dbg = tz->debugfs;
- struct tz_episode *tze;
struct tz_debugfs *tz_dbg;
- int trip_id, i;
+ struct tz_episode *tze;
+ int i;
if (!thermal_dbg)
return;
@@ -699,15 +723,16 @@ void thermal_debug_update_temp(struct thermal_zone_device *tz)
if (!tz_dbg->nr_trips)
goto out;
+ tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node);
+
for (i = 0; i < tz_dbg->nr_trips; i++) {
- trip_id = tz_dbg->trips_crossed[i];
- tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node);
- tze->trip_stats[trip_id].count++;
- tze->trip_stats[trip_id].max = max(tze->trip_stats[trip_id].max, tz->temperature);
- tze->trip_stats[trip_id].min = min(tze->trip_stats[trip_id].min, tz->temperature);
- tze->trip_stats[trip_id].avg = tze->trip_stats[trip_id].avg +
- (tz->temperature - tze->trip_stats[trip_id].avg) /
- tze->trip_stats[trip_id].count;
+ int trip_id = tz_dbg->trips_crossed[i];
+ struct trip_stats *trip_stats = &tze->trip_stats[trip_id];
+
+ trip_stats->max = max(trip_stats->max, tz->temperature);
+ trip_stats->min = min(trip_stats->min, tz->temperature);
+ trip_stats->avg += (tz->temperature - trip_stats->avg) /
+ ++trip_stats->count;
}
out:
mutex_unlock(&thermal_dbg->lock);
@@ -715,8 +740,7 @@ out:
static void *tze_seq_start(struct seq_file *s, loff_t *pos)
{
- struct thermal_zone_device *tz = s->private;
- struct thermal_debugfs *thermal_dbg = tz->debugfs;
+ struct thermal_debugfs *thermal_dbg = s->private;
struct tz_debugfs *tz_dbg = &thermal_dbg->tz_dbg;
mutex_lock(&thermal_dbg->lock);
@@ -726,8 +750,7 @@ static void *tze_seq_start(struct seq_file *s, loff_t *pos)
static void *tze_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
- struct thermal_zone_device *tz = s->private;
- struct thermal_debugfs *thermal_dbg = tz->debugfs;
+ struct thermal_debugfs *thermal_dbg = s->private;
struct tz_debugfs *tz_dbg = &thermal_dbg->tz_dbg;
return seq_list_next(v, &tz_dbg->tz_episodes, pos);
@@ -735,29 +758,46 @@ static void *tze_seq_next(struct seq_file *s, void *v, loff_t *pos)
static void tze_seq_stop(struct seq_file *s, void *v)
{
- struct thermal_zone_device *tz = s->private;
- struct thermal_debugfs *thermal_dbg = tz->debugfs;
+ struct thermal_debugfs *thermal_dbg = s->private;
mutex_unlock(&thermal_dbg->lock);
}
static int tze_seq_show(struct seq_file *s, void *v)
{
- struct thermal_zone_device *tz = s->private;
- struct thermal_trip *trip;
+ struct thermal_debugfs *thermal_dbg = s->private;
+ struct thermal_zone_device *tz = thermal_dbg->tz_dbg.tz;
+ struct thermal_trip_desc *td;
struct tz_episode *tze;
const char *type;
+ u64 duration_ms;
int trip_id;
+ char c;
tze = list_entry((struct list_head *)v, struct tz_episode, node);
- seq_printf(s, ",-Mitigation at %lluus, duration=%llums\n",
- ktime_to_us(tze->timestamp),
- ktime_to_ms(tze->duration));
+ if (tze->duration == KTIME_MIN) {
+ /* Mitigation in progress. */
+ duration_ms = ktime_to_ms(ktime_sub(ktime_get(), tze->timestamp));
+ c = '>';
+ } else {
+ duration_ms = ktime_to_ms(tze->duration);
+ c = '=';
+ }
+
+ seq_printf(s, ",-Mitigation at %lluus, duration%c%llums\n",
+ ktime_to_us(tze->timestamp), c, duration_ms);
+
+ seq_printf(s, "| trip | type | temp(°mC) | hyst(°mC) | duration | avg(°mC) | min(°mC) | max(°mC) |\n");
- seq_printf(s, "| trip | type | temp(°mC) | hyst(°mC) | duration | avg(°mC) | min(°mC) | max(°mC) |\n");
+ for_each_trip_desc(tz, td) {
+ const struct thermal_trip *trip = &td->trip;
+ struct trip_stats *trip_stats;
+
+ /* Skip invalid trips. */
+ if (trip->temperature == THERMAL_TEMP_INVALID)
+ continue;
- for_each_trip(tz, trip) {
/*
* There is no possible mitigation happening at the
* critical trip point, so the stats will be always
@@ -766,6 +806,13 @@ static int tze_seq_show(struct seq_file *s, void *v)
if (trip->type == THERMAL_TRIP_CRITICAL)
continue;
+ trip_id = thermal_zone_trip_id(tz, trip);
+ trip_stats = &tze->trip_stats[trip_id];
+
+ /* Skip trips without any stats. */
+ if (trip_stats->min > trip_stats->max)
+ continue;
+
if (trip->type == THERMAL_TRIP_PASSIVE)
type = "passive";
else if (trip->type == THERMAL_TRIP_ACTIVE)
@@ -773,17 +820,28 @@ static int tze_seq_show(struct seq_file *s, void *v)
else
type = "hot";
- trip_id = thermal_zone_trip_id(tz, trip);
+ if (trip_stats->timestamp != KTIME_MAX) {
+ /* Mitigation in progress. */
+ ktime_t delta = ktime_sub(ktime_get(),
+ trip_stats->timestamp);
+
+ delta = ktime_add(delta, trip_stats->duration);
+ duration_ms = ktime_to_ms(delta);
+ c = '>';
+ } else {
+ duration_ms = ktime_to_ms(trip_stats->duration);
+ c = ' ';
+ }
- seq_printf(s, "| %*d | %*s | %*d | %*d | %*lld | %*d | %*d | %*d |\n",
+ seq_printf(s, "| %*d | %*s | %*d | %*d | %c%*lld | %*d | %*d | %*d |\n",
4 , trip_id,
8, type,
9, trip->temperature,
9, trip->hysteresis,
- 10, ktime_to_ms(tze->trip_stats[trip_id].duration),
- 9, tze->trip_stats[trip_id].avg,
- 9, tze->trip_stats[trip_id].min,
- 9, tze->trip_stats[trip_id].max);
+ c, 10, duration_ms,
+ 9, trip_stats->avg,
+ 9, trip_stats->min,
+ 9, trip_stats->max);
}
return 0;
@@ -809,6 +867,8 @@ void thermal_debug_tz_add(struct thermal_zone_device *tz)
tz_dbg = &thermal_dbg->tz_dbg;
+ tz_dbg->tz = tz;
+
tz_dbg->trips_crossed = kzalloc(sizeof(int) * tz->num_trips, GFP_KERNEL);
if (!tz_dbg->trips_crossed) {
thermal_debugfs_remove_id(thermal_dbg);
@@ -817,23 +877,44 @@ void thermal_debug_tz_add(struct thermal_zone_device *tz)
INIT_LIST_HEAD(&tz_dbg->tz_episodes);
- debugfs_create_file("mitigations", 0400, thermal_dbg->d_top, tz, &tze_fops);
+ debugfs_create_file("mitigations", 0400, thermal_dbg->d_top,
+ thermal_dbg, &tze_fops);
tz->debugfs = thermal_dbg;
}
void thermal_debug_tz_remove(struct thermal_zone_device *tz)
{
- struct thermal_debugfs *thermal_dbg = tz->debugfs;
+ struct thermal_debugfs *thermal_dbg;
+ struct tz_episode *tze, *tmp;
+ struct tz_debugfs *tz_dbg;
+ int *trips_crossed;
- if (!thermal_dbg)
+ mutex_lock(&tz->lock);
+
+ thermal_dbg = tz->debugfs;
+ if (!thermal_dbg) {
+ mutex_unlock(&tz->lock);
return;
+ }
+
+ tz->debugfs = NULL;
+
+ mutex_unlock(&tz->lock);
+
+ tz_dbg = &thermal_dbg->tz_dbg;
mutex_lock(&thermal_dbg->lock);
- tz->debugfs = NULL;
+ trips_crossed = tz_dbg->trips_crossed;
+
+ list_for_each_entry_safe(tze, tmp, &tz_dbg->tz_episodes, node) {
+ list_del(&tze->node);
+ kfree(tze);
+ }
mutex_unlock(&thermal_dbg->lock);
thermal_debugfs_remove_id(thermal_dbg);
+ kfree(trips_crossed);
}
diff --git a/drivers/thermal/thermal_debugfs.h b/drivers/thermal/thermal_debugfs.h
index 155b9af5fe87..74ee65ee82ff 100644
--- a/drivers/thermal/thermal_debugfs.h
+++ b/drivers/thermal/thermal_debugfs.h
@@ -2,7 +2,7 @@
#ifdef CONFIG_THERMAL_DEBUGFS
void thermal_debug_init(void);
-void thermal_debug_cdev_add(struct thermal_cooling_device *cdev);
+void thermal_debug_cdev_add(struct thermal_cooling_device *cdev, int state);
void thermal_debug_cdev_remove(struct thermal_cooling_device *cdev);
void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev, int state);
void thermal_debug_tz_add(struct thermal_zone_device *tz);
@@ -11,10 +11,10 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
const struct thermal_trip *trip);
void thermal_debug_tz_trip_down(struct thermal_zone_device *tz,
const struct thermal_trip *trip);
-void thermal_debug_update_temp(struct thermal_zone_device *tz);
+void thermal_debug_update_trip_stats(struct thermal_zone_device *tz);
#else
static inline void thermal_debug_init(void) {}
-static inline void thermal_debug_cdev_add(struct thermal_cooling_device *cdev) {}
+static inline void thermal_debug_cdev_add(struct thermal_cooling_device *cdev, int state) {}
static inline void thermal_debug_cdev_remove(struct thermal_cooling_device *cdev) {}
static inline void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev,
int state) {}
@@ -24,5 +24,5 @@ static inline void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
const struct thermal_trip *trip) {};
static inline void thermal_debug_tz_trip_down(struct thermal_zone_device *tz,
const struct thermal_trip *trip) {}
-static inline void thermal_debug_update_temp(struct thermal_zone_device *tz) {}
+static inline void thermal_debug_update_trip_stats(struct thermal_zone_device *tz) {}
#endif /* CONFIG_THERMAL_DEBUGFS */
diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
index c5a057b59c42..d9f4e26ec125 100644
--- a/drivers/thermal/thermal_helpers.c
+++ b/drivers/thermal/thermal_helpers.c
@@ -50,7 +50,7 @@ get_thermal_instance(struct thermal_zone_device *tz,
mutex_lock(&tz->lock);
mutex_lock(&cdev->lock);
- trip = &tz->trips[trip_index];
+ trip = &tz->trips[trip_index].trip;
list_for_each_entry(pos, &tz->thermal_instances, tz_node) {
if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
@@ -82,7 +82,7 @@ EXPORT_SYMBOL(get_thermal_instance);
*/
int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
{
- const struct thermal_trip *trip;
+ const struct thermal_trip_desc *td;
int crit_temp = INT_MAX;
int ret = -EINVAL;
@@ -91,7 +91,9 @@ int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
ret = tz->ops.get_temp(tz, temp);
if (IS_ENABLED(CONFIG_THERMAL_EMULATION) && tz->emul_temperature) {
- for_each_trip(tz, trip) {
+ for_each_trip_desc(tz, td) {
+ const struct thermal_trip *trip = &td->trip;
+
if (trip->type == THERMAL_TRIP_CRITICAL) {
crit_temp = trip->temperature;
break;
diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c
index 76a231a29654..97157c453630 100644
--- a/drivers/thermal/thermal_netlink.c
+++ b/drivers/thermal/thermal_netlink.c
@@ -7,17 +7,13 @@
* Generic netlink for thermal management framework
*/
#include <linux/module.h>
+#include <linux/notifier.h>
#include <linux/kernel.h>
#include <net/genetlink.h>
#include <uapi/linux/thermal.h>
#include "thermal_core.h"
-enum thermal_genl_multicast_groups {
- THERMAL_GENL_SAMPLING_GROUP = 0,
- THERMAL_GENL_EVENT_GROUP = 1,
-};
-
static const struct genl_multicast_group thermal_genl_mcgrps[] = {
[THERMAL_GENL_SAMPLING_GROUP] = { .name = THERMAL_GENL_SAMPLING_GROUP_NAME, },
[THERMAL_GENL_EVENT_GROUP] = { .name = THERMAL_GENL_EVENT_GROUP_NAME, },
@@ -74,11 +70,12 @@ struct param {
typedef int (*cb_t)(struct param *);
-static struct genl_family thermal_gnl_family;
+static struct genl_family thermal_genl_family;
+static BLOCKING_NOTIFIER_HEAD(thermal_genl_chain);
static int thermal_group_has_listeners(enum thermal_genl_multicast_groups group)
{
- return genl_has_listeners(&thermal_gnl_family, &init_net, group);
+ return genl_has_listeners(&thermal_genl_family, &init_net, group);
}
/************************** Sampling encoding *******************************/
@@ -95,7 +92,7 @@ int thermal_genl_sampling_temp(int id, int temp)
if (!skb)
return -ENOMEM;
- hdr = genlmsg_put(skb, 0, 0, &thermal_gnl_family, 0,
+ hdr = genlmsg_put(skb, 0, 0, &thermal_genl_family, 0,
THERMAL_GENL_SAMPLING_TEMP);
if (!hdr)
goto out_free;
@@ -108,7 +105,7 @@ int thermal_genl_sampling_temp(int id, int temp)
genlmsg_end(skb, hdr);
- genlmsg_multicast(&thermal_gnl_family, skb, 0, THERMAL_GENL_SAMPLING_GROUP, GFP_KERNEL);
+ genlmsg_multicast(&thermal_genl_family, skb, 0, THERMAL_GENL_SAMPLING_GROUP, GFP_KERNEL);
return 0;
out_cancel:
@@ -282,7 +279,7 @@ static int thermal_genl_send_event(enum thermal_genl_event event,
return -ENOMEM;
p->msg = msg;
- hdr = genlmsg_put(msg, 0, 0, &thermal_gnl_family, 0, event);
+ hdr = genlmsg_put(msg, 0, 0, &thermal_genl_family, 0, event);
if (!hdr)
goto out_free_msg;
@@ -292,7 +289,7 @@ static int thermal_genl_send_event(enum thermal_genl_event event,
genlmsg_end(msg, hdr);
- genlmsg_multicast(&thermal_gnl_family, msg, 0, THERMAL_GENL_EVENT_GROUP, GFP_KERNEL);
+ genlmsg_multicast(&thermal_genl_family, msg, 0, THERMAL_GENL_EVENT_GROUP, GFP_KERNEL);
return 0;
@@ -445,7 +442,7 @@ out_cancel_nest:
static int thermal_genl_cmd_tz_get_trip(struct param *p)
{
struct sk_buff *msg = p->msg;
- const struct thermal_trip *trip;
+ const struct thermal_trip_desc *td;
struct thermal_zone_device *tz;
struct nlattr *start_trip;
int id;
@@ -465,7 +462,9 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p)
mutex_lock(&tz->lock);
- for_each_trip(tz, trip) {
+ for_each_trip_desc(tz, td) {
+ const struct thermal_trip *trip = &td->trip;
+
if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_ID,
thermal_zone_trip_id(tz, trip)) ||
nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, trip->type) ||
@@ -593,7 +592,7 @@ static int thermal_genl_cmd_dumpit(struct sk_buff *skb,
int ret;
void *hdr;
- hdr = genlmsg_put(skb, 0, 0, &thermal_gnl_family, 0, cmd);
+ hdr = genlmsg_put(skb, 0, 0, &thermal_genl_family, 0, cmd);
if (!hdr)
return -EMSGSIZE;
@@ -625,7 +624,7 @@ static int thermal_genl_cmd_doit(struct sk_buff *skb,
return -ENOMEM;
p.msg = msg;
- hdr = genlmsg_put_reply(msg, info, &thermal_gnl_family, 0, cmd);
+ hdr = genlmsg_put_reply(msg, info, &thermal_genl_family, 0, cmd);
if (!hdr)
goto out_free_msg;
@@ -645,6 +644,27 @@ out_free_msg:
return ret;
}
+static int thermal_genl_bind(int mcgrp)
+{
+ struct thermal_genl_notify n = { .mcgrp = mcgrp };
+
+ if (WARN_ON_ONCE(mcgrp > THERMAL_GENL_MAX_GROUP))
+ return -EINVAL;
+
+ blocking_notifier_call_chain(&thermal_genl_chain, THERMAL_NOTIFY_BIND, &n);
+ return 0;
+}
+
+static void thermal_genl_unbind(int mcgrp)
+{
+ struct thermal_genl_notify n = { .mcgrp = mcgrp };
+
+ if (WARN_ON_ONCE(mcgrp > THERMAL_GENL_MAX_GROUP))
+ return;
+
+ blocking_notifier_call_chain(&thermal_genl_chain, THERMAL_NOTIFY_UNBIND, &n);
+}
+
static const struct genl_small_ops thermal_genl_ops[] = {
{
.cmd = THERMAL_GENL_CMD_TZ_GET_ID,
@@ -673,12 +693,14 @@ static const struct genl_small_ops thermal_genl_ops[] = {
},
};
-static struct genl_family thermal_gnl_family __ro_after_init = {
+static struct genl_family thermal_genl_family __ro_after_init = {
.hdrsize = 0,
.name = THERMAL_GENL_FAMILY_NAME,
.version = THERMAL_GENL_VERSION,
.maxattr = THERMAL_GENL_ATTR_MAX,
.policy = thermal_genl_policy,
+ .bind = thermal_genl_bind,
+ .unbind = thermal_genl_unbind,
.small_ops = thermal_genl_ops,
.n_small_ops = ARRAY_SIZE(thermal_genl_ops),
.resv_start_op = THERMAL_GENL_CMD_CDEV_GET + 1,
@@ -686,12 +708,22 @@ static struct genl_family thermal_gnl_family __ro_after_init = {
.n_mcgrps = ARRAY_SIZE(thermal_genl_mcgrps),
};
+int thermal_genl_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&thermal_genl_chain, nb);
+}
+
+int thermal_genl_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&thermal_genl_chain, nb);
+}
+
int __init thermal_netlink_init(void)
{
- return genl_register_family(&thermal_gnl_family);
+ return genl_register_family(&thermal_genl_family);
}
void __init thermal_netlink_exit(void)
{
- genl_unregister_family(&thermal_gnl_family);
+ genl_unregister_family(&thermal_genl_family);
}
diff --git a/drivers/thermal/thermal_netlink.h b/drivers/thermal/thermal_netlink.h
index 93a927e144d5..e01221e8816b 100644
--- a/drivers/thermal/thermal_netlink.h
+++ b/drivers/thermal/thermal_netlink.h
@@ -10,6 +10,19 @@ struct thermal_genl_cpu_caps {
int efficiency;
};
+enum thermal_genl_multicast_groups {
+ THERMAL_GENL_SAMPLING_GROUP = 0,
+ THERMAL_GENL_EVENT_GROUP = 1,
+ THERMAL_GENL_MAX_GROUP = THERMAL_GENL_EVENT_GROUP,
+};
+
+#define THERMAL_NOTIFY_BIND 0
+#define THERMAL_NOTIFY_UNBIND 1
+
+struct thermal_genl_notify {
+ int mcgrp;
+};
+
struct thermal_zone_device;
struct thermal_trip;
struct thermal_cooling_device;
@@ -18,6 +31,9 @@ struct thermal_cooling_device;
#ifdef CONFIG_THERMAL_NETLINK
int __init thermal_netlink_init(void);
void __init thermal_netlink_exit(void);
+int thermal_genl_register_notifier(struct notifier_block *nb);
+int thermal_genl_unregister_notifier(struct notifier_block *nb);
+
int thermal_notify_tz_create(const struct thermal_zone_device *tz);
int thermal_notify_tz_delete(const struct thermal_zone_device *tz);
int thermal_notify_tz_enable(const struct thermal_zone_device *tz);
@@ -48,6 +64,16 @@ static inline int thermal_notify_tz_create(const struct thermal_zone_device *tz)
return 0;
}
+static inline int thermal_genl_register_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int thermal_genl_unregister_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
static inline int thermal_notify_tz_delete(const struct thermal_zone_device *tz)
{
return 0;
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index 5b533fa40437..88211ccdfbd6 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -88,7 +88,7 @@ trip_point_type_show(struct device *dev, struct device_attribute *attr,
if (sscanf(attr->attr.name, "trip_point_%d_type", &trip_id) != 1)
return -EINVAL;
- switch (tz->trips[trip_id].type) {
+ switch (tz->trips[trip_id].trip.type) {
case THERMAL_TRIP_CRITICAL:
return sprintf(buf, "critical\n");
case THERMAL_TRIP_HOT:
@@ -120,7 +120,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
mutex_lock(&tz->lock);
- trip = &tz->trips[trip_id];
+ trip = &tz->trips[trip_id].trip;
if (temp != trip->temperature) {
if (tz->ops.set_trip_temp) {
@@ -150,7 +150,7 @@ trip_point_temp_show(struct device *dev, struct device_attribute *attr,
if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip_id) != 1)
return -EINVAL;
- return sprintf(buf, "%d\n", tz->trips[trip_id].temperature);
+ return sprintf(buf, "%d\n", tz->trips[trip_id].trip.temperature);
}
static ssize_t
@@ -171,7 +171,7 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
mutex_lock(&tz->lock);
- trip = &tz->trips[trip_id];
+ trip = &tz->trips[trip_id].trip;
if (hyst != trip->hysteresis) {
trip->hysteresis = hyst;
@@ -194,7 +194,7 @@ trip_point_hyst_show(struct device *dev, struct device_attribute *attr,
if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1)
return -EINVAL;
- return sprintf(buf, "%d\n", tz->trips[trip_id].hysteresis);
+ return sprintf(buf, "%d\n", tz->trips[trip_id].trip.hysteresis);
}
static ssize_t
@@ -393,7 +393,7 @@ static const struct attribute_group *thermal_zone_attribute_groups[] = {
*/
static int create_trip_attrs(struct thermal_zone_device *tz)
{
- const struct thermal_trip *trip;
+ const struct thermal_trip_desc *td;
struct attribute **attrs;
/* This function works only for zones with at least one trip */
@@ -429,8 +429,8 @@ static int create_trip_attrs(struct thermal_zone_device *tz)
return -ENOMEM;
}
- for_each_trip(tz, trip) {
- int indx = thermal_zone_trip_id(tz, trip);
+ for_each_trip_desc(tz, td) {
+ int indx = thermal_zone_trip_id(tz, &td->trip);
/* create trip type attribute */
snprintf(tz->trip_type_attrs[indx].name, THERMAL_NAME_LENGTH,
@@ -452,7 +452,7 @@ static int create_trip_attrs(struct thermal_zone_device *tz)
tz->trip_temp_attrs[indx].name;
tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO;
tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show;
- if (trip->flags & THERMAL_TRIP_FLAG_RW_TEMP) {
+ if (td->trip.flags & THERMAL_TRIP_FLAG_RW_TEMP) {
tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR;
tz->trip_temp_attrs[indx].attr.store =
trip_point_temp_store;
@@ -467,7 +467,7 @@ static int create_trip_attrs(struct thermal_zone_device *tz)
tz->trip_hyst_attrs[indx].name;
tz->trip_hyst_attrs[indx].attr.attr.mode = S_IRUGO;
tz->trip_hyst_attrs[indx].attr.show = trip_point_hyst_show;
- if (trip->flags & THERMAL_TRIP_FLAG_RW_HYST) {
+ if (td->trip.flags & THERMAL_TRIP_FLAG_RW_HYST) {
tz->trip_hyst_attrs[indx].attr.attr.mode |= S_IWUSR;
tz->trip_hyst_attrs[indx].attr.store =
trip_point_hyst_store;
diff --git a/drivers/thermal/thermal_trace.h b/drivers/thermal/thermal_trace.h
index 459c8ce6cf3b..88a962f560f2 100644
--- a/drivers/thermal/thermal_trace.h
+++ b/drivers/thermal/thermal_trace.h
@@ -9,6 +9,8 @@
#include <linux/thermal.h>
#include <linux/tracepoint.h>
+#include "thermal_core.h"
+
TRACE_DEFINE_ENUM(THERMAL_TRIP_CRITICAL);
TRACE_DEFINE_ENUM(THERMAL_TRIP_HOT);
TRACE_DEFINE_ENUM(THERMAL_TRIP_PASSIVE);
diff --git a/drivers/thermal/thermal_trace_ipa.h b/drivers/thermal/thermal_trace_ipa.h
index b16b5dd863d9..a82821eebc88 100644
--- a/drivers/thermal/thermal_trace_ipa.h
+++ b/drivers/thermal/thermal_trace_ipa.h
@@ -7,6 +7,8 @@
#include <linux/tracepoint.h>
+#include "thermal_core.h"
+
TRACE_EVENT(thermal_power_allocator,
TP_PROTO(struct thermal_zone_device *tz, u32 total_req_power,
u32 total_granted_power, int num_actors, u32 power_range,
diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c
index 497abf0d47ca..21ece8399997 100644
--- a/drivers/thermal/thermal_trip.c
+++ b/drivers/thermal/thermal_trip.c
@@ -13,11 +13,11 @@ int for_each_thermal_trip(struct thermal_zone_device *tz,
int (*cb)(struct thermal_trip *, void *),
void *data)
{
- struct thermal_trip *trip;
+ struct thermal_trip_desc *td;
int ret;
- for_each_trip(tz, trip) {
- ret = cb(trip, data);
+ for_each_trip_desc(tz, td) {
+ ret = cb(&td->trip, data);
if (ret)
return ret;
}
@@ -63,7 +63,7 @@ EXPORT_SYMBOL_GPL(thermal_zone_get_num_trips);
*/
void __thermal_zone_set_trips(struct thermal_zone_device *tz)
{
- const struct thermal_trip *trip;
+ const struct thermal_trip_desc *td;
int low = -INT_MAX, high = INT_MAX;
int ret;
@@ -72,7 +72,8 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
if (!tz->ops.set_trips)
return;
- for_each_trip(tz, trip) {
+ for_each_trip_desc(tz, td) {
+ const struct thermal_trip *trip = &td->trip;
int trip_low;
trip_low = trip->temperature - trip->hysteresis;
@@ -110,7 +111,7 @@ int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
if (!tz || trip_id < 0 || trip_id >= tz->num_trips || !trip)
return -EINVAL;
- *trip = tz->trips[trip_id];
+ *trip = tz->trips[trip_id].trip;
return 0;
}
EXPORT_SYMBOL_GPL(__thermal_zone_get_trip);
@@ -135,8 +136,9 @@ int thermal_zone_trip_id(const struct thermal_zone_device *tz,
* Assume the trip to be located within the bounds of the thermal
* zone's trips[] table.
*/
- return trip - tz->trips;
+ return trip_to_trip_desc(trip) - tz->trips;
}
+
void thermal_zone_trip_updated(struct thermal_zone_device *tz,
const struct thermal_trip *trip)
{
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 6ffc4e81ffed..326433df5880 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -3180,22 +3180,29 @@ void tb_switch_unconfigure_link(struct tb_switch *sw)
{
struct tb_port *up, *down;
- if (sw->is_unplugged)
- return;
if (!tb_route(sw) || tb_switch_is_icm(sw))
return;
+ /*
+ * Unconfigure downstream port so that wake-on-connect can be
+ * configured after router unplug. No need to unconfigure upstream port
+ * since its router is unplugged.
+ */
up = tb_upstream_port(sw);
- if (tb_switch_is_usb4(up->sw))
- usb4_port_unconfigure(up);
- else
- tb_lc_unconfigure_port(up);
-
down = up->remote;
if (tb_switch_is_usb4(down->sw))
usb4_port_unconfigure(down);
else
tb_lc_unconfigure_port(down);
+
+ if (sw->is_unplugged)
+ return;
+
+ up = tb_upstream_port(sw);
+ if (tb_switch_is_usb4(up->sw))
+ usb4_port_unconfigure(up);
+ else
+ tb_lc_unconfigure_port(up);
}
static void tb_switch_credits_init(struct tb_switch *sw)
@@ -3441,7 +3448,26 @@ static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
return tb_lc_set_wake(sw, flags);
}
-int tb_switch_resume(struct tb_switch *sw)
+static void tb_switch_check_wakes(struct tb_switch *sw)
+{
+ if (device_may_wakeup(&sw->dev)) {
+ if (tb_switch_is_usb4(sw))
+ usb4_switch_check_wakes(sw);
+ }
+}
+
+/**
+ * tb_switch_resume() - Resume a switch after sleep
+ * @sw: Switch to resume
+ * @runtime: Is this resume from runtime suspend or system sleep
+ *
+ * Resumes and re-enumerates router (and all its children), if still plugged
+ * after suspend. Don't enumerate device router whose UID was changed during
+ * suspend. If this is resume from system sleep, notifies PM core about the
+ * wakes occurred during suspend. Disables all wakes, except USB4 wake of
+ * upstream port for USB4 routers that shall be always enabled.
+ */
+int tb_switch_resume(struct tb_switch *sw, bool runtime)
{
struct tb_port *port;
int err;
@@ -3490,6 +3516,9 @@ int tb_switch_resume(struct tb_switch *sw)
if (err)
return err;
+ if (!runtime)
+ tb_switch_check_wakes(sw);
+
/* Disable wakes */
tb_switch_set_wake(sw, 0);
@@ -3519,7 +3548,8 @@ int tb_switch_resume(struct tb_switch *sw)
*/
if (tb_port_unlock(port))
tb_port_warn(port, "failed to unlock port\n");
- if (port->remote && tb_switch_resume(port->remote->sw)) {
+ if (port->remote &&
+ tb_switch_resume(port->remote->sw, runtime)) {
tb_port_warn(port,
"lost during suspend, disconnecting\n");
tb_sw_set_unplugged(port->remote->sw);
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index c5ce7a694b27..3e44c78ac409 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -1801,6 +1801,12 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
continue;
}
+ /* Needs to be on different routers */
+ if (in->sw == port->sw) {
+ tb_port_dbg(port, "skipping DP OUT on same router\n");
+ continue;
+ }
+
tb_port_dbg(port, "DP OUT available\n");
/*
@@ -2936,7 +2942,7 @@ static int tb_resume_noirq(struct tb *tb)
if (!tb_switch_is_usb4(tb->root_switch))
tb_switch_reset(tb->root_switch);
- tb_switch_resume(tb->root_switch);
+ tb_switch_resume(tb->root_switch, false);
tb_free_invalid_tunnels(tb);
tb_free_unplugged_children(tb->root_switch);
tb_restore_children(tb->root_switch);
@@ -3062,7 +3068,7 @@ static int tb_runtime_resume(struct tb *tb)
struct tb_tunnel *tunnel, *n;
mutex_lock(&tb->lock);
- tb_switch_resume(tb->root_switch);
+ tb_switch_resume(tb->root_switch, true);
tb_free_invalid_tunnels(tb);
tb_restore_children(tb->root_switch);
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index feed8ecaf712..18aae4ccaed5 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -827,7 +827,7 @@ int tb_switch_configuration_valid(struct tb_switch *sw);
int tb_switch_add(struct tb_switch *sw);
void tb_switch_remove(struct tb_switch *sw);
void tb_switch_suspend(struct tb_switch *sw, bool runtime);
-int tb_switch_resume(struct tb_switch *sw);
+int tb_switch_resume(struct tb_switch *sw, bool runtime);
int tb_switch_reset(struct tb_switch *sw);
int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
u32 value, int timeout_msec);
@@ -1288,6 +1288,7 @@ static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
return usb4_switch_version(sw) > 0;
}
+void usb4_switch_check_wakes(struct tb_switch *sw);
int usb4_switch_setup(struct tb_switch *sw);
int usb4_switch_configuration_valid(struct tb_switch *sw);
int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 9860b49d7a2b..78b06e922fda 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -155,7 +155,13 @@ static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
tx_dwords, rx_data, rx_dwords);
}
-static void usb4_switch_check_wakes(struct tb_switch *sw)
+/**
+ * usb4_switch_check_wakes() - Check for wakes and notify PM core about them
+ * @sw: Router whose wakes to check
+ *
+ * Checks wakes occurred during suspend and notify the PM core about them.
+ */
+void usb4_switch_check_wakes(struct tb_switch *sw)
{
bool wakeup_usb4 = false;
struct usb4_port *usb4;
@@ -163,9 +169,6 @@ static void usb4_switch_check_wakes(struct tb_switch *sw)
bool wakeup = false;
u32 val;
- if (!device_may_wakeup(&sw->dev))
- return;
-
if (tb_route(sw)) {
if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
return;
@@ -244,8 +247,6 @@ int usb4_switch_setup(struct tb_switch *sw)
u32 val = 0;
int ret;
- usb4_switch_check_wakes(sw);
-
if (!tb_route(sw))
return 0;
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 1aa3e55c8b47..6a7b286f6f5a 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -293,9 +293,6 @@ static inline int serial8250_in_MCR(struct uart_8250_port *up)
return mctrl;
}
-bool alpha_jensen(void);
-void alpha_jensen_set_mctrl(struct uart_port *port, unsigned int mctrl);
-
#ifdef CONFIG_SERIAL_8250_PNP
int serial8250_pnp_init(void);
void serial8250_pnp_exit(void);
diff --git a/drivers/tty/serial/8250/8250_alpha.c b/drivers/tty/serial/8250/8250_alpha.c
deleted file mode 100644
index 58e70328aa4d..000000000000
--- a/drivers/tty/serial/8250/8250_alpha.c
+++ /dev/null
@@ -1,21 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-
-#include <asm/machvec.h>
-#include "8250.h"
-
-bool alpha_jensen(void)
-{
- return !strcmp(alpha_mv.vector_name, "Jensen");
-}
-
-void alpha_jensen_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
- /*
- * Digital did something really horribly wrong with the OUT1 and OUT2
- * lines on Alpha Jensen. The failure mode is that if either is
- * cleared, the machine locks up with endless interrupts.
- */
- mctrl |= TIOCM_OUT1 | TIOCM_OUT2;
-
- serial8250_do_set_mctrl(port, mctrl);
-}
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index b62ad9006780..2504e0455875 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -508,10 +508,6 @@ static struct uart_8250_port *serial8250_setup_port(int index)
up->ops = &univ8250_driver_ops;
- if (IS_ENABLED(CONFIG_ALPHA_JENSEN) ||
- (IS_ENABLED(CONFIG_ALPHA_GENERIC) && alpha_jensen()))
- up->port.set_mctrl = alpha_jensen_set_mctrl;
-
serial8250_set_defaults(up);
return up;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index a3acbf0f5da1..1300c92b8702 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -356,9 +356,9 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
long rate;
int ret;
+ clk_disable_unprepare(d->clk);
rate = clk_round_rate(d->clk, newrate);
- if (rate > 0 && p->uartclk != rate) {
- clk_disable_unprepare(d->clk);
+ if (rate > 0) {
/*
* Note that any clock-notifer worker will block in
* serial8250_update_uartclk() until we are done.
@@ -366,8 +366,8 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
ret = clk_set_rate(d->clk, newrate);
if (!ret)
p->uartclk = rate;
- clk_prepare_enable(d->clk);
}
+ clk_prepare_enable(d->clk);
dw8250_do_set_termios(p, termios, old);
}
diff --git a/drivers/tty/serial/8250/8250_lpc18xx.c b/drivers/tty/serial/8250/8250_lpc18xx.c
index 7984ee05af1d..47e1a056a60c 100644
--- a/drivers/tty/serial/8250/8250_lpc18xx.c
+++ b/drivers/tty/serial/8250/8250_lpc18xx.c
@@ -151,7 +151,7 @@ static int lpc18xx_serial_probe(struct platform_device *pdev)
ret = uart_read_port_properties(&uart.port);
if (ret)
- return ret;
+ goto dis_uart_clk;
uart.port.iotype = UPIO_MEM32;
uart.port.regshift = 2;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 0d35c77fad9e..e2e4f99f9d34 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -5010,12 +5010,6 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATRO_B,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b0_bt_2_115200 },
- { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATTRO_A,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_bt_2_115200 },
- { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUATTRO_B,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_b0_bt_2_115200 },
{ PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_OCTO_A,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b0_bt_4_460800 },
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index ea2e81f58eac..69ac00270547 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -5,8 +5,6 @@
obj-$(CONFIG_SERIAL_8250) += 8250.o 8250_base.o
8250-y := 8250_core.o
-8250-$(CONFIG_ALPHA_GENERIC) += 8250_alpha.o
-8250-$(CONFIG_ALPHA_JENSEN) += 8250_alpha.o
8250-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o
8250_base-y := 8250_port.o
8250_base-$(CONFIG_SERIAL_8250_DMA) += 8250_dma.o
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 4749331fe618..1e8853eae504 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1086,11 +1086,13 @@ static void mxs_auart_set_ldisc(struct uart_port *port,
static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
{
- u32 istat;
+ u32 istat, stat;
struct mxs_auart_port *s = context;
u32 mctrl_temp = s->mctrl_prev;
- u32 stat = mxs_read(s, REG_STAT);
+ uart_port_lock(&s->port);
+
+ stat = mxs_read(s, REG_STAT);
istat = mxs_read(s, REG_INTR);
/* ack irq */
@@ -1126,6 +1128,8 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
istat &= ~AUART_INTR_TXIS;
}
+ uart_port_unlock(&s->port);
+
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 05d97e89511e..92195f984de1 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -210,7 +210,6 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
{
struct tty_port *port;
unsigned char ch, r1, drop, flag;
- int loops = 0;
/* Sanity check, make sure the old bug is no longer happening */
if (uap->port.state == NULL) {
@@ -291,25 +290,12 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
if (r1 & Rx_OVR)
tty_insert_flip_char(port, 0, TTY_OVERRUN);
next_char:
- /* We can get stuck in an infinite loop getting char 0 when the
- * line is in a wrong HW state, we break that here.
- * When that happens, I disable the receive side of the driver.
- * Note that what I've been experiencing is a real irq loop where
- * I'm getting flooded regardless of the actual port speed.
- * Something strange is going on with the HW
- */
- if ((++loops) > 1000)
- goto flood;
ch = read_zsreg(uap, R0);
if (!(ch & Rx_CH_AV))
break;
}
return true;
- flood:
- pmz_interrupt_control(uap, 0);
- pmz_error("pmz: rx irq flood !\n");
- return true;
}
static void pmz_status_handle(struct uart_pmac_port *uap)
diff --git a/drivers/tty/serial/serial_base.h b/drivers/tty/serial/serial_base.h
index c74c548f0db6..b6c38d2edfd4 100644
--- a/drivers/tty/serial/serial_base.h
+++ b/drivers/tty/serial/serial_base.h
@@ -22,6 +22,7 @@ struct serial_ctrl_device {
struct serial_port_device {
struct device dev;
struct uart_port *port;
+ unsigned int tx_enabled:1;
};
int serial_base_ctrl_init(void);
@@ -30,6 +31,9 @@ void serial_base_ctrl_exit(void);
int serial_base_port_init(void);
void serial_base_port_exit(void);
+void serial_base_port_startup(struct uart_port *port);
+void serial_base_port_shutdown(struct uart_port *port);
+
int serial_base_driver_register(struct device_driver *driver);
void serial_base_driver_unregister(struct device_driver *driver);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index ff85ebd3a007..c476d884356d 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -156,7 +156,7 @@ static void __uart_start(struct uart_state *state)
* enabled, serial_port_runtime_resume() calls start_tx() again
* after enabling the device.
*/
- if (pm_runtime_active(&port_dev->dev))
+ if (!pm_runtime_enabled(port->dev) || pm_runtime_active(&port_dev->dev))
port->ops->start_tx(port);
pm_runtime_mark_last_busy(&port_dev->dev);
pm_runtime_put_autosuspend(&port_dev->dev);
@@ -323,16 +323,26 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state,
bool init_hw)
{
struct tty_port *port = &state->port;
+ struct uart_port *uport;
int retval;
if (tty_port_initialized(port))
- return 0;
+ goto out_base_port_startup;
retval = uart_port_startup(tty, state, init_hw);
- if (retval)
+ if (retval) {
set_bit(TTY_IO_ERROR, &tty->flags);
+ return retval;
+ }
- return retval;
+out_base_port_startup:
+ uport = uart_port_check(state);
+ if (!uport)
+ return -EIO;
+
+ serial_base_port_startup(uport);
+
+ return 0;
}
/*
@@ -355,6 +365,9 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
if (tty)
set_bit(TTY_IO_ERROR, &tty->flags);
+ if (uport)
+ serial_base_port_shutdown(uport);
+
if (tty_port_initialized(port)) {
tty_port_set_initialized(port, false);
@@ -1775,6 +1788,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
uport->ops->stop_rx(uport);
uart_port_unlock_irq(uport);
+ serial_base_port_shutdown(uport);
uart_port_shutdown(port);
/*
@@ -1788,6 +1802,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
* Free the transmit buffer.
*/
uart_port_lock_irq(uport);
+ uart_circ_clear(&state->xmit);
buf = state->xmit.buf;
state->xmit.buf = NULL;
uart_port_unlock_irq(uport);
diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
index 22b9eeb23e68..7e3a1c7b097c 100644
--- a/drivers/tty/serial/serial_port.c
+++ b/drivers/tty/serial/serial_port.c
@@ -39,8 +39,12 @@ static int serial_port_runtime_resume(struct device *dev)
/* Flush any pending TX for the port */
uart_port_lock_irqsave(port, &flags);
+ if (!port_dev->tx_enabled)
+ goto unlock;
if (__serial_port_busy(port))
port->ops->start_tx(port);
+
+unlock:
uart_port_unlock_irqrestore(port, flags);
out:
@@ -60,6 +64,11 @@ static int serial_port_runtime_suspend(struct device *dev)
return 0;
uart_port_lock_irqsave(port, &flags);
+ if (!port_dev->tx_enabled) {
+ uart_port_unlock_irqrestore(port, flags);
+ return 0;
+ }
+
busy = __serial_port_busy(port);
if (busy)
port->ops->start_tx(port);
@@ -71,6 +80,31 @@ static int serial_port_runtime_suspend(struct device *dev)
return busy ? -EBUSY : 0;
}
+static void serial_base_port_set_tx(struct uart_port *port,
+ struct serial_port_device *port_dev,
+ bool enabled)
+{
+ unsigned long flags;
+
+ uart_port_lock_irqsave(port, &flags);
+ port_dev->tx_enabled = enabled;
+ uart_port_unlock_irqrestore(port, flags);
+}
+
+void serial_base_port_startup(struct uart_port *port)
+{
+ struct serial_port_device *port_dev = port->port_dev;
+
+ serial_base_port_set_tx(port, port_dev, true);
+}
+
+void serial_base_port_shutdown(struct uart_port *port)
+{
+ struct serial_port_device *port_dev = port->port_dev;
+
+ serial_base_port_set_tx(port, port_dev, false);
+}
+
static DEFINE_RUNTIME_DEV_PM_OPS(serial_port_pm,
serial_port_runtime_suspend,
serial_port_runtime_resume, NULL);
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 58d169e5c1db..4fa5a03ebac0 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -861,6 +861,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
u32 sr;
unsigned int size;
+ irqreturn_t ret = IRQ_NONE;
sr = readl_relaxed(port->membase + ofs->isr);
@@ -869,11 +870,14 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
(sr & USART_SR_TC)) {
stm32_usart_tc_interrupt_disable(port);
stm32_usart_rs485_rts_disable(port);
+ ret = IRQ_HANDLED;
}
- if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
+ if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) {
writel_relaxed(USART_ICR_RTOCF,
port->membase + ofs->icr);
+ ret = IRQ_HANDLED;
+ }
if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
/* Clear wake up flag and disable wake up interrupt */
@@ -882,6 +886,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
pm_wakeup_event(tport->tty->dev, 0);
+ ret = IRQ_HANDLED;
}
/*
@@ -896,6 +901,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
uart_unlock_and_check_sysrq(port);
if (size)
tty_flip_buffer_push(tport);
+ ret = IRQ_HANDLED;
}
}
@@ -903,6 +909,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
uart_port_lock(port);
stm32_usart_transmit_chars(port);
uart_port_unlock(port);
+ ret = IRQ_HANDLED;
}
/* Receiver timeout irq for DMA RX */
@@ -912,9 +919,10 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
uart_unlock_and_check_sysrq(port);
if (size)
tty_flip_buffer_push(tport);
+ ret = IRQ_HANDLED;
}
- return IRQ_HANDLED;
+ return ret;
}
static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
@@ -1084,6 +1092,7 @@ static int stm32_usart_startup(struct uart_port *port)
val |= USART_CR2_SWAP;
writel_relaxed(val, port->membase + ofs->cr2);
}
+ stm32_port->throttled = false;
/* RX FIFO Flush */
if (ofs->rqr != UNDEF_REG)
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 06859e17b67b..7a00004bfd03 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -47,7 +47,7 @@ enum {
TSTBUS_MAX,
};
-#define QCOM_UFS_MAX_GEAR 4
+#define QCOM_UFS_MAX_GEAR 5
#define QCOM_UFS_MAX_LANE 2
enum {
@@ -67,26 +67,32 @@ static const struct __ufs_qcom_bw_table {
[MODE_PWM][UFS_PWM_G2][UFS_LANE_1] = { 1844, 1000 },
[MODE_PWM][UFS_PWM_G3][UFS_LANE_1] = { 3688, 1000 },
[MODE_PWM][UFS_PWM_G4][UFS_LANE_1] = { 7376, 1000 },
+ [MODE_PWM][UFS_PWM_G5][UFS_LANE_1] = { 14752, 1000 },
[MODE_PWM][UFS_PWM_G1][UFS_LANE_2] = { 1844, 1000 },
[MODE_PWM][UFS_PWM_G2][UFS_LANE_2] = { 3688, 1000 },
[MODE_PWM][UFS_PWM_G3][UFS_LANE_2] = { 7376, 1000 },
[MODE_PWM][UFS_PWM_G4][UFS_LANE_2] = { 14752, 1000 },
+ [MODE_PWM][UFS_PWM_G5][UFS_LANE_2] = { 29504, 1000 },
[MODE_HS_RA][UFS_HS_G1][UFS_LANE_1] = { 127796, 1000 },
[MODE_HS_RA][UFS_HS_G2][UFS_LANE_1] = { 255591, 1000 },
[MODE_HS_RA][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
[MODE_HS_RA][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
+ [MODE_HS_RA][UFS_HS_G5][UFS_LANE_1] = { 5836800, 409600 },
[MODE_HS_RA][UFS_HS_G1][UFS_LANE_2] = { 255591, 1000 },
[MODE_HS_RA][UFS_HS_G2][UFS_LANE_2] = { 511181, 1000 },
[MODE_HS_RA][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
[MODE_HS_RA][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
+ [MODE_HS_RA][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 },
[MODE_HS_RB][UFS_HS_G1][UFS_LANE_1] = { 149422, 1000 },
[MODE_HS_RB][UFS_HS_G2][UFS_LANE_1] = { 298189, 1000 },
[MODE_HS_RB][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
[MODE_HS_RB][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
+ [MODE_HS_RB][UFS_HS_G5][UFS_LANE_1] = { 5836800, 409600 },
[MODE_HS_RB][UFS_HS_G1][UFS_LANE_2] = { 298189, 1000 },
[MODE_HS_RB][UFS_HS_G2][UFS_LANE_2] = { 596378, 1000 },
[MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
[MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
+ [MODE_HS_RB][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 },
[MODE_MAX][0][0] = { 7643136, 307200 },
};
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 20d9762331bd..6be3462b109f 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -181,12 +181,14 @@ hv_uio_cleanup(struct hv_device *dev, struct hv_uio_private_data *pdata)
{
if (pdata->send_gpadl.gpadl_handle) {
vmbus_teardown_gpadl(dev->channel, &pdata->send_gpadl);
- vfree(pdata->send_buf);
+ if (!pdata->send_gpadl.decrypted)
+ vfree(pdata->send_buf);
}
if (pdata->recv_gpadl.gpadl_handle) {
vmbus_teardown_gpadl(dev->channel, &pdata->recv_gpadl);
- vfree(pdata->recv_buf);
+ if (!pdata->recv_gpadl.decrypted)
+ vfree(pdata->recv_buf);
}
}
@@ -295,7 +297,8 @@ hv_uio_probe(struct hv_device *dev,
ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
RECV_BUFFER_SIZE, &pdata->recv_gpadl);
if (ret) {
- vfree(pdata->recv_buf);
+ if (!pdata->recv_gpadl.decrypted)
+ vfree(pdata->recv_buf);
goto fail_close;
}
@@ -317,7 +320,8 @@ hv_uio_probe(struct hv_device *dev,
ret = vmbus_establish_gpadl(channel, pdata->send_buf,
SEND_BUFFER_SIZE, &pdata->send_gpadl);
if (ret) {
- vfree(pdata->send_buf);
+ if (!pdata->send_gpadl.decrypted)
+ vfree(pdata->send_buf);
goto fail_close;
}
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index c8262e2f2917..c553decb5461 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -485,7 +485,6 @@ out_free_mem:
static int service_outstanding_interrupt(struct wdm_device *desc)
{
int rv = 0;
- int used;
/* submit read urb only if the device is waiting for it */
if (!desc->resp_count || !--desc->resp_count)
@@ -500,10 +499,7 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
goto out;
}
- used = test_and_set_bit(WDM_RESPONDING, &desc->flags);
- if (used)
- goto out;
-
+ set_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
rv = usb_submit_urb(desc->response, GFP_KERNEL);
spin_lock_irq(&desc->iuspin);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 9446660e231b..008053039875 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -5110,9 +5110,10 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
}
if (usb_endpoint_maxp(&udev->ep0.desc) == i) {
; /* Initial ep0 maxpacket guess is right */
- } else if ((udev->speed == USB_SPEED_FULL ||
+ } else if (((udev->speed == USB_SPEED_FULL ||
udev->speed == USB_SPEED_HIGH) &&
- (i == 8 || i == 16 || i == 32 || i == 64)) {
+ (i == 8 || i == 16 || i == 32 || i == 64)) ||
+ (udev->speed >= USB_SPEED_SUPER && i > 0)) {
/* Initial guess is wrong; use the descriptor's value */
if (udev->speed == USB_SPEED_FULL)
dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i);
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 686c01af03e6..e7da2fca11a4 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -51,13 +51,15 @@ static ssize_t disable_show(struct device *dev,
struct usb_port *port_dev = to_usb_port(dev);
struct usb_device *hdev = to_usb_device(dev->parent->parent);
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
- struct usb_interface *intf = to_usb_interface(hub->intfdev);
+ struct usb_interface *intf = to_usb_interface(dev->parent);
int port1 = port_dev->portnum;
u16 portstatus, unused;
bool disabled;
int rc;
struct kernfs_node *kn;
+ if (!hub)
+ return -ENODEV;
hub_get(hub);
rc = usb_autopm_get_interface(intf);
if (rc < 0)
@@ -101,12 +103,14 @@ static ssize_t disable_store(struct device *dev, struct device_attribute *attr,
struct usb_port *port_dev = to_usb_port(dev);
struct usb_device *hdev = to_usb_device(dev->parent->parent);
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
- struct usb_interface *intf = to_usb_interface(hub->intfdev);
+ struct usb_interface *intf = to_usb_interface(dev->parent);
int port1 = port_dev->portnum;
bool disabled;
int rc;
struct kernfs_node *kn;
+ if (!hub)
+ return -ENODEV;
rc = kstrtobool(buf, &disabled);
if (rc)
return rc;
@@ -449,8 +453,10 @@ static void usb_port_shutdown(struct device *dev)
{
struct usb_port *port_dev = to_usb_port(dev);
- if (port_dev->child)
+ if (port_dev->child) {
usb_disable_usb2_hardware_lpm(port_dev->child);
+ usb_unlocked_disable_lpm(port_dev->child);
+ }
}
static const struct dev_pm_ops usb_port_pm_ops = {
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index 79582b102c7e..994a78ad084b 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -867,13 +867,15 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_dma_desc *dma_desc;
struct dwc2_hcd_iso_packet_desc *frame_desc;
u16 frame_desc_idx;
- struct urb *usb_urb = qtd->urb->priv;
+ struct urb *usb_urb;
u16 remain = 0;
int rc = 0;
if (!qtd->urb)
return -EINVAL;
+ usb_urb = qtd->urb->priv;
+
dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
sizeof(struct dwc2_dma_desc)),
sizeof(struct dwc2_dma_desc),
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 31684cdaaae3..100041320e8d 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -104,6 +104,27 @@ static int dwc3_get_dr_mode(struct dwc3 *dwc)
return 0;
}
+void dwc3_enable_susphy(struct dwc3 *dwc, bool enable)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+ if (enable && !dwc->dis_u3_susphy_quirk)
+ reg |= DWC3_GUSB3PIPECTL_SUSPHY;
+ else
+ reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
+
+ dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ if (enable && !dwc->dis_u2_susphy_quirk)
+ reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+ else
+ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+}
+
void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
{
u32 reg;
@@ -585,11 +606,8 @@ static int dwc3_core_ulpi_init(struct dwc3 *dwc)
*/
static int dwc3_phy_setup(struct dwc3 *dwc)
{
- unsigned int hw_mode;
u32 reg;
- hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
-
reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
/*
@@ -599,21 +617,16 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX;
/*
- * Above 1.94a, it is recommended to set DWC3_GUSB3PIPECTL_SUSPHY
- * to '0' during coreConsultant configuration. So default value
- * will be '0' when the core is reset. Application needs to set it
- * to '1' after the core initialization is completed.
- */
- if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
- reg |= DWC3_GUSB3PIPECTL_SUSPHY;
-
- /*
- * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be cleared after
- * power-on reset, and it can be set after core initialization, which is
- * after device soft-reset during initialization.
+ * Above DWC_usb3.0 1.94a, it is recommended to set
+ * DWC3_GUSB3PIPECTL_SUSPHY to '0' during coreConsultant configuration.
+ * So default value will be '0' when the core is reset. Application
+ * needs to set it to '1' after the core initialization is completed.
+ *
+ * Similarly for DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be
+ * cleared after power-on reset, and it can be set after core
+ * initialization.
*/
- if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD)
- reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
+ reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
if (dwc->u2ss_inp3_quirk)
reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK;
@@ -639,9 +652,6 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
if (dwc->tx_de_emphasis_quirk)
reg |= DWC3_GUSB3PIPECTL_TX_DEEPH(dwc->tx_de_emphasis);
- if (dwc->dis_u3_susphy_quirk)
- reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
-
if (dwc->dis_del_phy_power_chg_quirk)
reg &= ~DWC3_GUSB3PIPECTL_DEPOCHANGE;
@@ -689,24 +699,15 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
}
/*
- * Above 1.94a, it is recommended to set DWC3_GUSB2PHYCFG_SUSPHY to
- * '0' during coreConsultant configuration. So default value will
- * be '0' when the core is reset. Application needs to set it to
- * '1' after the core initialization is completed.
- */
- if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
- reg |= DWC3_GUSB2PHYCFG_SUSPHY;
-
- /*
- * For DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared after
- * power-on reset, and it can be set after core initialization, which is
- * after device soft-reset during initialization.
+ * Above DWC_usb3.0 1.94a, it is recommended to set
+ * DWC3_GUSB2PHYCFG_SUSPHY to '0' during coreConsultant configuration.
+ * So default value will be '0' when the core is reset. Application
+ * needs to set it to '1' after the core initialization is completed.
+ *
+ * Similarly for DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared
+ * after power-on reset, and it can be set after core initialization.
*/
- if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD)
- reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
-
- if (dwc->dis_u2_susphy_quirk)
- reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
if (dwc->dis_enblslpm_quirk)
reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
@@ -1227,21 +1228,6 @@ static int dwc3_core_init(struct dwc3 *dwc)
if (ret)
goto err_exit_phy;
- if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD &&
- !DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) {
- if (!dwc->dis_u3_susphy_quirk) {
- reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
- reg |= DWC3_GUSB3PIPECTL_SUSPHY;
- dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
- }
-
- if (!dwc->dis_u2_susphy_quirk) {
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- reg |= DWC3_GUSB2PHYCFG_SUSPHY;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
- }
- }
-
dwc3_core_setup_global_control(dwc);
dwc3_core_num_eps(dwc);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 7e80dd3d466b..180dd8d29287 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1580,6 +1580,7 @@ int dwc3_event_buffers_setup(struct dwc3 *dwc);
void dwc3_event_buffers_cleanup(struct dwc3 *dwc);
int dwc3_core_soft_reset(struct dwc3 *dwc);
+void dwc3_enable_susphy(struct dwc3 *dwc, bool enable);
#if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
int dwc3_host_init(struct dwc3 *dwc);
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 72bb722da2f2..d96ffbe52039 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -226,7 +226,8 @@ void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
/* reinitialize physical ep1 */
dep = dwc->eps[1];
- dep->flags = DWC3_EP_ENABLED;
+ dep->flags &= DWC3_EP_RESOURCE_ALLOCATED;
+ dep->flags |= DWC3_EP_ENABLED;
/* stall is always issued on EP0 */
dep = dwc->eps[0];
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 4df2661f6675..f94f68f1e7d2 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2924,6 +2924,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
dwc3_ep0_out_start(dwc);
dwc3_gadget_enable_irq(dwc);
+ dwc3_enable_susphy(dwc, true);
return 0;
@@ -4690,6 +4691,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
if (!dwc->gadget)
return;
+ dwc3_enable_susphy(dwc, false);
usb_del_gadget(dwc->gadget);
dwc3_gadget_free_endpoints(dwc);
usb_put_gadget(dwc->gadget);
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 0204787df81d..a171b27a7845 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -10,10 +10,13 @@
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include "../host/xhci-port.h"
#include "../host/xhci-ext-caps.h"
#include "../host/xhci-caps.h"
+#include "../host/xhci-plat.h"
#include "core.h"
#define XHCI_HCSPARAMS1 0x4
@@ -57,6 +60,24 @@ static void dwc3_power_off_all_roothub_ports(struct dwc3 *dwc)
}
}
+static void dwc3_xhci_plat_start(struct usb_hcd *hcd)
+{
+ struct platform_device *pdev;
+ struct dwc3 *dwc;
+
+ if (!usb_hcd_is_primary_hcd(hcd))
+ return;
+
+ pdev = to_platform_device(hcd->self.controller);
+ dwc = dev_get_drvdata(pdev->dev.parent);
+
+ dwc3_enable_susphy(dwc, true);
+}
+
+static const struct xhci_plat_priv dwc3_xhci_plat_quirk = {
+ .plat_start = dwc3_xhci_plat_start,
+};
+
static void dwc3_host_fill_xhci_irq_res(struct dwc3 *dwc,
int irq, char *name)
{
@@ -167,6 +188,11 @@ int dwc3_host_init(struct dwc3 *dwc)
}
}
+ ret = platform_device_add_data(xhci, &dwc3_xhci_plat_quirk,
+ sizeof(struct xhci_plat_priv));
+ if (ret)
+ goto err;
+
ret = platform_device_add(xhci);
if (ret) {
dev_err(dwc->dev, "failed to register xHCI device\n");
@@ -192,6 +218,7 @@ void dwc3_host_exit(struct dwc3 *dwc)
if (dwc->sys_wakeup)
device_init_wakeup(&dwc->xhci->dev, false);
+ dwc3_enable_susphy(dwc, false);
platform_device_unregister(dwc->xhci);
dwc->xhci = NULL;
}
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 0ace45b66a31..0e151b54aae8 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2112,7 +2112,7 @@ unknown:
buf[5] = 0x01;
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
- if (w_index != 0x4 || (w_value >> 8))
+ if (w_index != 0x4 || (w_value & 0xff))
break;
buf[6] = w_index;
/* Number of ext compat interfaces */
@@ -2128,9 +2128,9 @@ unknown:
}
break;
case USB_RECIP_INTERFACE:
- if (w_index != 0x5 || (w_value >> 8))
+ if (w_index != 0x5 || (w_value & 0xff))
break;
- interface = w_value & 0xFF;
+ interface = w_value >> 8;
if (interface >= MAX_CONFIG_INTERFACES ||
!os_desc_cfg->interface[interface])
break;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index bffbc1dc651f..a057cbedf3c9 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -46,6 +46,8 @@
#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
+#define DMABUF_ENQUEUE_TIMEOUT_MS 5000
+
MODULE_IMPORT_NS(DMA_BUF);
/* Reference counter handling */
@@ -850,6 +852,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
work);
int ret = io_data->status;
bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
+ unsigned long flags;
if (io_data->read && ret > 0) {
kthread_use_mm(io_data->mm);
@@ -862,6 +865,11 @@ static void ffs_user_copy_worker(struct work_struct *work)
if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
eventfd_signal(io_data->ffs->ffs_eventfd);
+ spin_lock_irqsave(&io_data->ffs->eps_lock, flags);
+ usb_ep_free_request(io_data->ep, io_data->req);
+ io_data->req = NULL;
+ spin_unlock_irqrestore(&io_data->ffs->eps_lock, flags);
+
if (io_data->read)
kfree(io_data->to_free);
ffs_free_buffer(io_data);
@@ -875,7 +883,6 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
struct ffs_data *ffs = io_data->ffs;
io_data->status = req->status ? req->status : req->actual;
- usb_ep_free_request(_ep, req);
INIT_WORK(&io_data->work, ffs_user_copy_worker);
queue_work(ffs->io_completion_wq, &io_data->work);
@@ -1578,10 +1585,13 @@ static int ffs_dmabuf_transfer(struct file *file,
struct ffs_dmabuf_priv *priv;
struct ffs_dma_fence *fence;
struct usb_request *usb_req;
+ enum dma_resv_usage resv_dir;
struct dma_buf *dmabuf;
+ unsigned long timeout;
struct ffs_ep *ep;
bool cookie;
u32 seqno;
+ long retl;
int ret;
if (req->flags & ~USB_FFS_DMABUF_TRANSFER_MASK)
@@ -1615,17 +1625,14 @@ static int ffs_dmabuf_transfer(struct file *file,
goto err_attachment_put;
/* Make sure we don't have writers */
- if (!dma_resv_test_signaled(dmabuf->resv, DMA_RESV_USAGE_WRITE)) {
- pr_vdebug("FFS WRITE fence is not signaled\n");
- ret = -EBUSY;
- goto err_resv_unlock;
- }
-
- /* If we're writing to the DMABUF, make sure we don't have readers */
- if (epfile->in &&
- !dma_resv_test_signaled(dmabuf->resv, DMA_RESV_USAGE_READ)) {
- pr_vdebug("FFS READ fence is not signaled\n");
- ret = -EBUSY;
+ timeout = nonblock ? 0 : msecs_to_jiffies(DMABUF_ENQUEUE_TIMEOUT_MS);
+ retl = dma_resv_wait_timeout(dmabuf->resv,
+ dma_resv_usage_rw(epfile->in),
+ true, timeout);
+ if (retl == 0)
+ retl = -EBUSY;
+ if (retl < 0) {
+ ret = (int)retl;
goto err_resv_unlock;
}
@@ -1665,8 +1672,9 @@ static int ffs_dmabuf_transfer(struct file *file,
dma_fence_init(&fence->base, &ffs_dmabuf_fence_ops,
&priv->lock, priv->context, seqno);
- dma_resv_add_fence(dmabuf->resv, &fence->base,
- dma_resv_usage_rw(epfile->in));
+ resv_dir = epfile->in ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ;
+
+ dma_resv_add_fence(dmabuf->resv, &fence->base, resv_dir);
dma_resv_unlock(dmabuf->resv);
/* Now that the dma_fence is in place, queue the transfer. */
@@ -3803,7 +3811,7 @@ static int ffs_func_setup(struct usb_function *f,
__ffs_event_add(ffs, FUNCTIONFS_SETUP);
spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
- return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
+ return ffs->ev.setup.wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
}
static bool ffs_func_req_match(struct usb_function *f,
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 28f4e6552e84..0acc32ed9960 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -878,7 +878,7 @@ static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
if (alt > 1)
goto fail;
- if (ncm->port.in_ep->enabled) {
+ if (ncm->netdev) {
DBG(cdev, "reset ncm\n");
ncm->netdev = NULL;
gether_disconnect(&ncm->port);
@@ -1367,7 +1367,7 @@ static void ncm_disable(struct usb_function *f)
DBG(cdev, "ncm deactivated\n");
- if (ncm->port.in_ep->enabled) {
+ if (ncm->netdev) {
ncm->netdev = NULL;
gether_disconnect(&ncm->port);
}
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index 7e704b2bcfd1..a4377df612f5 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -92,10 +92,10 @@ static int __uvcg_iter_item_entries(const char *page, size_t len,
while (pg - page < len) {
i = 0;
- while (i < sizeof(buf) && (pg - page < len) &&
+ while (i < bufsize && (pg - page < len) &&
*pg != '\0' && *pg != '\n')
buf[i++] = *pg++;
- if (i == sizeof(buf)) {
+ if (i == bufsize) {
ret = -EINVAL;
goto out_free_buf;
}
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index e82d03224f94..3432ebfae978 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -868,7 +868,7 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep);
struct fsl_req *req = container_of(_req, struct fsl_req, req);
- struct fsl_udc *udc;
+ struct fsl_udc *udc = ep->udc;
unsigned long flags;
int ret;
@@ -878,7 +878,7 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
dev_vdbg(&udc->gadget.dev, "%s, bad params\n", __func__);
return -EINVAL;
}
- if (unlikely(!_ep || !ep->ep.desc)) {
+ if (unlikely(!ep->ep.desc)) {
dev_vdbg(&udc->gadget.dev, "%s, bad ep\n", __func__);
return -EINVAL;
}
@@ -887,7 +887,6 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
return -EMSGSIZE;
}
- udc = ep->udc;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 4f9982ecfb58..5cec7640e913 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -888,6 +888,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
/* Check for an all 1's result which is a typical consequence
* of dead, unclocked, or unplugged (CardBus...) devices
*/
+again:
if (ints == ~(u32)0) {
ohci->rh_state = OHCI_RH_HALTED;
ohci_dbg (ohci, "device removed!\n");
@@ -982,6 +983,13 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
}
spin_unlock(&ohci->lock);
+ /* repeat until all enabled interrupts are handled */
+ if (ohci->rh_state != OHCI_RH_HALTED) {
+ ints = ohci_readl(ohci, &regs->intrstatus);
+ if (ints && (ints & ohci_readl(ohci, &regs->intrenable)))
+ goto again;
+ }
+
return IRQ_HANDLED;
}
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 2d15386f2c50..6475130eac4b 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -8,7 +8,9 @@
#ifndef _XHCI_PLAT_H
#define _XHCI_PLAT_H
-#include "xhci.h" /* for hcd_to_xhci() */
+struct device;
+struct platform_device;
+struct usb_hcd;
struct xhci_plat_priv {
const char *firmware_name;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 52278afea94b..575f0fd9c9f1 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3133,7 +3133,7 @@ static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir
irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- irqreturn_t ret = IRQ_NONE;
+ irqreturn_t ret = IRQ_HANDLED;
u32 status;
spin_lock(&xhci->lock);
@@ -3141,12 +3141,13 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
status = readl(&xhci->op_regs->status);
if (status == ~(u32)0) {
xhci_hc_died(xhci);
- ret = IRQ_HANDLED;
goto out;
}
- if (!(status & STS_EINT))
+ if (!(status & STS_EINT)) {
+ ret = IRQ_NONE;
goto out;
+ }
if (status & STS_HCE) {
xhci_warn(xhci, "WARNING: Host Controller Error\n");
@@ -3156,7 +3157,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
if (status & STS_FATAL) {
xhci_warn(xhci, "WARNING: Host System Error\n");
xhci_halt(xhci);
- ret = IRQ_HANDLED;
goto out;
}
@@ -3167,7 +3167,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
*/
status |= STS_EINT;
writel(status, &xhci->op_regs->status);
- ret = IRQ_HANDLED;
/* This is the handler of the primary interrupter */
xhci_handle_events(xhci, xhci->interrupters[0]);
diff --git a/drivers/usb/host/xhci-rzv2m.c b/drivers/usb/host/xhci-rzv2m.c
index ec65b24eafa8..4f59867d7117 100644
--- a/drivers/usb/host/xhci-rzv2m.c
+++ b/drivers/usb/host/xhci-rzv2m.c
@@ -6,6 +6,7 @@
*/
#include <linux/usb/rzv2m_usb3drd.h>
+#include "xhci.h"
#include "xhci-plat.h"
#include "xhci-rzv2m.h"
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index 1740000d54c2..5762564b9d73 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -172,8 +172,7 @@ DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
__field(void *, vdev)
__field(unsigned long long, out_ctx)
__field(unsigned long long, in_ctx)
- __field(int, hcd_portnum)
- __field(int, hw_portnum)
+ __field(int, slot_id)
__field(u16, current_mel)
),
@@ -181,13 +180,12 @@ DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
__entry->vdev = vdev;
__entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
__entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
- __entry->hcd_portnum = (int) vdev->rhub_port->hcd_portnum;
- __entry->hw_portnum = (int) vdev->rhub_port->hw_portnum;
+ __entry->slot_id = (int) vdev->slot_id;
__entry->current_mel = (u16) vdev->current_mel;
),
- TP_printk("vdev %p ctx %llx | %llx hcd_portnum %d hw_portnum %d current_mel %d",
- __entry->vdev, __entry->in_ctx, __entry->out_ctx,
- __entry->hcd_portnum, __entry->hw_portnum, __entry->current_mel
+ TP_printk("vdev %p slot %d ctx %llx | %llx current_mel %d",
+ __entry->vdev, __entry->slot_id, __entry->in_ctx,
+ __entry->out_ctx, __entry->current_mel
)
);
diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
index c6101ed2d9d4..d8049275a023 100644
--- a/drivers/usb/misc/onboard_usb_hub.c
+++ b/drivers/usb/misc/onboard_usb_hub.c
@@ -78,7 +78,7 @@ static int onboard_hub_power_on(struct onboard_hub *hub)
err = regulator_bulk_enable(hub->pdata->num_supplies, hub->supplies);
if (err) {
dev_err(hub->dev, "failed to enable supplies: %pe\n", ERR_PTR(err));
- return err;
+ goto disable_clk;
}
fsleep(hub->pdata->reset_us);
@@ -87,6 +87,10 @@ static int onboard_hub_power_on(struct onboard_hub *hub)
hub->is_powered_on = true;
return 0;
+
+disable_clk:
+ clk_disable_unprepare(hub->clk);
+ return err;
}
static int onboard_hub_power_off(struct onboard_hub *hub)
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 55a65d941ccb..8a5846d4adf6 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -255,6 +255,10 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EM061K_LMS 0x0124
#define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_EM060K_128 0x0128
+#define QUECTEL_PRODUCT_EM060K_129 0x0129
+#define QUECTEL_PRODUCT_EM060K_12a 0x012a
+#define QUECTEL_PRODUCT_EM060K_12b 0x012b
+#define QUECTEL_PRODUCT_EM060K_12c 0x012c
#define QUECTEL_PRODUCT_EG91 0x0191
#define QUECTEL_PRODUCT_EG95 0x0195
#define QUECTEL_PRODUCT_BG96 0x0296
@@ -1218,6 +1222,18 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) },
@@ -1360,6 +1376,12 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff), /* Telit FN20C04 (rmnet) */
+ .driver_info = RSVD(0) | NCTRL(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff), /* Telit FN20C04 (rmnet) */
+ .driver_info = RSVD(0) | NCTRL(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff), /* Telit FN20C04 (rmnet) */
+ .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -2052,6 +2074,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
.driver_info = RSVD(4) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b05), /* Longsung U8300 */
+ .driver_info = RSVD(4) | RSVD(5) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b3c), /* Longsung U9300 */
+ .driver_info = RSVD(0) | RSVD(4) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
@@ -2272,15 +2298,29 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0115, 0xff), /* Fibocom FM135 (laptop MBIM) */
+ .driver_info = RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
.driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a04, 0xff) }, /* Fibocom FM650-CN (ECM mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a05, 0xff) }, /* Fibocom FM650-CN (NCM mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a06, 0xff) }, /* Fibocom FM650-CN (RNDIS mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a07, 0xff) }, /* Fibocom FM650-CN (MBIM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
+ { USB_DEVICE(0x33f8, 0x0104), /* Rolling RW101-GL (laptop RMNET) */
+ .driver_info = RSVD(4) | RSVD(5) },
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a2, 0xff) }, /* Rolling RW101-GL (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a3, 0xff) }, /* Rolling RW101-GL (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a4, 0xff), /* Rolling RW101-GL (laptop MBIM) */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0115, 0xff), /* Rolling RW135-GL (laptop MBIM) */
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
diff --git a/drivers/usb/typec/mux/it5205.c b/drivers/usb/typec/mux/it5205.c
index 5535932e42cd..4357cc67a867 100644
--- a/drivers/usb/typec/mux/it5205.c
+++ b/drivers/usb/typec/mux/it5205.c
@@ -22,7 +22,7 @@
#include <linux/usb/typec_mux.h>
#define IT5205_REG_CHIP_ID(x) (0x4 + (x))
-#define IT5205FN_CHIP_ID 0x35323035 /* "5205" */
+#define IT5205FN_CHIP_ID 0x35303235 /* "5025" -> "5205" */
/* MUX power down register */
#define IT5205_REG_MUXPDR 0x10
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
index e48412cdcb0f..d3958c061a97 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
@@ -104,14 +104,18 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
ret = tcpm->port_start(tcpm, tcpm->tcpm_port);
if (ret)
- goto fwnode_remove;
+ goto port_unregister;
ret = tcpm->pdphy_start(tcpm, tcpm->tcpm_port);
if (ret)
- goto fwnode_remove;
+ goto port_stop;
return 0;
+port_stop:
+ tcpm->port_stop(tcpm);
+port_unregister:
+ tcpm_unregister_port(tcpm->tcpm_port);
fwnode_remove:
fwnode_remove_software_node(tcpm->tcpc.fwnode);
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
index 6560f4fc98d5..5b7f52b74a40 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
@@ -475,10 +475,8 @@ static int qcom_pmic_typec_pdphy_enable(struct pmic_typec_pdphy *pmic_typec_pdph
qcom_pmic_typec_pdphy_reset_off(pmic_typec_pdphy);
done:
- if (ret) {
- regulator_disable(pmic_typec_pdphy->vdd_pdphy);
+ if (ret)
dev_err(dev, "pdphy_enable fail %d\n", ret);
- }
return ret;
}
@@ -524,12 +522,17 @@ static int qcom_pmic_typec_pdphy_start(struct pmic_typec *tcpm,
ret = pmic_typec_pdphy_reset(pmic_typec_pdphy);
if (ret)
- return ret;
+ goto err_disable_vdd_pdhy;
for (i = 0; i < pmic_typec_pdphy->nr_irqs; i++)
enable_irq(pmic_typec_pdphy->irq_data[i].irq);
return 0;
+
+err_disable_vdd_pdhy:
+ regulator_disable(pmic_typec_pdphy->vdd_pdphy);
+
+ return ret;
}
static void qcom_pmic_typec_pdphy_stop(struct pmic_typec *tcpm)
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index c26fb70c3ec6..8a1af08f71b6 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -1564,8 +1564,12 @@ static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header,
const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
{
+ if (port->state != SRC_READY && port->state != SNK_READY &&
+ port->state != SRC_VDM_IDENTITY_REQUEST)
+ return;
+
mutex_lock(&port->lock);
- tcpm_queue_vdm(port, header, data, cnt, TCPC_TX_SOP);
+ tcpm_queue_vdm(port, header, data, cnt, tx_sop_type);
mutex_unlock(&port->lock);
}
@@ -1580,7 +1584,8 @@ static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt)
port->partner_ident.cert_stat = p[VDO_INDEX_CSTAT];
port->partner_ident.product = product;
- typec_partner_set_identity(port->partner);
+ if (port->partner)
+ typec_partner_set_identity(port->partner);
tcpm_log(port, "Identity: %04x:%04x.%04x",
PD_IDH_VID(vdo),
@@ -1742,6 +1747,9 @@ static void tcpm_register_partner_altmodes(struct tcpm_port *port)
struct typec_altmode *altmode;
int i;
+ if (!port->partner)
+ return;
+
for (i = 0; i < modep->altmodes; i++) {
altmode = typec_partner_register_altmode(port->partner,
&modep->altmode_desc[i]);
@@ -2996,7 +3004,7 @@ static int tcpm_register_source_caps(struct tcpm_port *port)
{
struct usb_power_delivery_desc desc = { port->negotiated_rev };
struct usb_power_delivery_capabilities_desc caps = { };
- struct usb_power_delivery_capabilities *cap;
+ struct usb_power_delivery_capabilities *cap = port->partner_source_caps;
if (!port->partner_pd)
port->partner_pd = usb_power_delivery_register(NULL, &desc);
@@ -3006,6 +3014,9 @@ static int tcpm_register_source_caps(struct tcpm_port *port)
memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps);
caps.role = TYPEC_SOURCE;
+ if (cap)
+ usb_power_delivery_unregister_capabilities(cap);
+
cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
if (IS_ERR(cap))
return PTR_ERR(cap);
@@ -4231,7 +4242,10 @@ static int tcpm_init_vconn(struct tcpm_port *port)
static void tcpm_typec_connect(struct tcpm_port *port)
{
+ struct typec_partner *partner;
+
if (!port->connected) {
+ port->connected = true;
/* Make sure we don't report stale identity information */
memset(&port->partner_ident, 0, sizeof(port->partner_ident));
port->partner_desc.usb_pd = port->pd_capable;
@@ -4241,9 +4255,13 @@ static void tcpm_typec_connect(struct tcpm_port *port)
port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
else
port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
- port->partner = typec_register_partner(port->typec_port,
- &port->partner_desc);
- port->connected = true;
+ partner = typec_register_partner(port->typec_port, &port->partner_desc);
+ if (IS_ERR(partner)) {
+ dev_err(port->dev, "Failed to register partner (%ld)\n", PTR_ERR(partner));
+ return;
+ }
+
+ port->partner = partner;
typec_partner_set_usb_power_delivery(port->partner, port->partner_pd);
}
}
@@ -4323,9 +4341,11 @@ static void tcpm_typec_disconnect(struct tcpm_port *port)
port->plug_prime = NULL;
port->cable = NULL;
if (port->connected) {
- typec_partner_set_usb_power_delivery(port->partner, NULL);
- typec_unregister_partner(port->partner);
- port->partner = NULL;
+ if (port->partner) {
+ typec_partner_set_usb_power_delivery(port->partner, NULL);
+ typec_unregister_partner(port->partner);
+ port->partner = NULL;
+ }
port->connected = false;
}
}
@@ -4549,6 +4569,9 @@ static enum typec_cc_status tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode)
static void tcpm_set_initial_svdm_version(struct tcpm_port *port)
{
+ if (!port->partner)
+ return;
+
switch (port->negotiated_rev) {
case PD_REV30:
break;
@@ -5605,6 +5628,7 @@ static void run_state_machine(struct tcpm_port *port)
break;
case PORT_RESET:
tcpm_reset_port(port);
+ port->pd_events = 0;
if (port->self_powered)
tcpm_set_cc(port, TYPEC_CC_OPEN);
else
@@ -6855,14 +6879,14 @@ static int tcpm_pd_set(struct typec_port *p, struct usb_power_delivery *pd)
if (data->sink_desc.pdo[0]) {
for (i = 0; i < PDO_MAX_OBJECTS && data->sink_desc.pdo[i]; i++)
port->snk_pdo[i] = data->sink_desc.pdo[i];
- port->nr_snk_pdo = i + 1;
+ port->nr_snk_pdo = i;
port->operating_snk_mw = data->operating_snk_mw;
}
if (data->source_desc.pdo[0]) {
for (i = 0; i < PDO_MAX_OBJECTS && data->source_desc.pdo[i]; i++)
port->src_pdo[i] = data->source_desc.pdo[i];
- port->nr_src_pdo = i + 1;
+ port->nr_src_pdo = i;
}
switch (port->state) {
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 31d8a46ae5e7..bd6ae92aa39e 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -1736,11 +1736,13 @@ static int ucsi_init(struct ucsi *ucsi)
ucsi->connector = connector;
ucsi->ntfy = ntfy;
+ mutex_lock(&ucsi->ppm_lock);
ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
+ mutex_unlock(&ucsi->ppm_lock);
if (ret)
return ret;
- if (UCSI_CCI_CONNECTOR(READ_ONCE(cci)))
- ucsi_connector_change(ucsi, cci);
+ if (UCSI_CCI_CONNECTOR(cci))
+ ucsi_connector_change(ucsi, UCSI_CCI_CONNECTOR(cci));
return 0;
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index b246067e074b..6cb96a1e8b7d 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -967,7 +967,7 @@ vdpa_dev_blk_seg_size_config_fill(struct sk_buff *msg, u64 features,
val_u32 = __virtio32_to_cpu(true, config->size_max);
- return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_SEG_SIZE, val_u32);
+ return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_SIZE_MAX, val_u32);
}
/* fill the block size*/
@@ -1089,7 +1089,7 @@ static int vdpa_dev_blk_ro_config_fill(struct sk_buff *msg, u64 features)
u8 ro;
ro = ((features & BIT_ULL(VIRTIO_BLK_F_RO)) == 0) ? 0 : 1;
- if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_READ_ONLY, ro))
+ if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_READ_ONLY, ro))
return -EMSGSIZE;
return 0;
@@ -1100,7 +1100,7 @@ static int vdpa_dev_blk_flush_config_fill(struct sk_buff *msg, u64 features)
u8 flush;
flush = ((features & BIT_ULL(VIRTIO_BLK_F_FLUSH)) == 0) ? 0 : 1;
- if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_FLUSH, flush))
+ if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_FLUSH, flush))
return -EMSGSIZE;
return 0;
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index cb5b7f865d58..e727941f589d 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -71,6 +71,8 @@ static bool vfio_pci_dev_in_denylist(struct pci_dev *pdev)
case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
+ case PCI_DEVICE_ID_INTEL_DSA_SPR0:
+ case PCI_DEVICE_ID_INTEL_IAX_SPR0:
return true;
default:
return false;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index c64ded183f8d..f16279351db5 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -380,7 +380,7 @@ static void vhost_zerocopy_signal_used(struct vhost_net *net,
}
}
-static void vhost_zerocopy_callback(struct sk_buff *skb,
+static void vhost_zerocopy_complete(struct sk_buff *skb,
struct ubuf_info *ubuf_base, bool success)
{
struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base);
@@ -408,6 +408,10 @@ static void vhost_zerocopy_callback(struct sk_buff *skb,
rcu_read_unlock_bh();
}
+static const struct ubuf_info_ops vhost_ubuf_ops = {
+ .complete = vhost_zerocopy_complete,
+};
+
static inline unsigned long busy_clock(void)
{
return local_clock() >> 10;
@@ -879,7 +883,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
ubuf->ctx = nvq->ubufs;
ubuf->desc = nvq->upend_idx;
- ubuf->ubuf.callback = vhost_zerocopy_callback;
+ ubuf->ubuf.ops = &vhost_ubuf_ops;
ubuf->ubuf.flags = SKBFL_ZEROCOPY_FRAG;
refcount_set(&ubuf->ubuf.refcnt, 1);
msg.msg_control = &ctl;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 045f666b4f12..8995730ce0bf 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -2515,7 +2515,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
- vq_err(vq, "Guest moved used index from %u to %u",
+ vq_err(vq, "Guest moved avail index from %u to %u",
last_avail_idx, vq->avail_idx);
return -EFAULT;
}
@@ -2799,9 +2799,19 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
r = vhost_get_avail_idx(vq, &avail_idx);
if (unlikely(r))
return false;
+
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
+ if (vq->avail_idx != vq->last_avail_idx) {
+ /* Since we have updated avail_idx, the following
+ * call to vhost_get_vq_desc() will read available
+ * ring entries. Make sure that read happens after
+ * the avail_idx read.
+ */
+ smp_rmb();
+ return false;
+ }
- return vq->avail_idx == vq->last_avail_idx;
+ return true;
}
EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
@@ -2838,9 +2848,19 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
&vq->avail->idx, r);
return false;
}
+
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
+ if (vq->avail_idx != vq->last_avail_idx) {
+ /* Since we have updated avail_idx, the following
+ * call to vhost_get_vq_desc() will read available
+ * ring entries. Make sure that read happens after
+ * the avail_idx read.
+ */
+ smp_rmb();
+ return true;
+ }
- return vq->avail_idx != vq->last_avail_idx;
+ return false;
}
EXPORT_SYMBOL_GPL(vhost_enable_notify);
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index dae96c9f61cf..806ecd32219b 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -196,7 +196,7 @@ err_mutex_unlock:
*/
static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
{
- unsigned long offset = vmf->address - vmf->vma->vm_start;
+ unsigned long offset = vmf->pgoff << PAGE_SHIFT;
struct page *page = vmf->page;
file_update_time(vmf->vma->vm_file);
diff --git a/drivers/virt/acrn/ioreq.c b/drivers/virt/acrn/ioreq.c
index 29e1ef1915fd..e94358239a4b 100644
--- a/drivers/virt/acrn/ioreq.c
+++ b/drivers/virt/acrn/ioreq.c
@@ -433,7 +433,7 @@ struct acrn_ioreq_client *acrn_ioreq_client_create(struct acrn_vm *vm,
client->priv = priv;
client->is_default = is_default;
if (name)
- strncpy(client->name, name, sizeof(client->name) - 1);
+ strscpy(client->name, name);
rwlock_init(&client->range_lock);
INIT_LIST_HEAD(&client->range_list);
init_waitqueue_head(&client->wq);
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index 87f241825bc3..654290a8e1ba 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -59,7 +59,7 @@ struct snp_guest_dev {
*/
struct snp_guest_msg secret_request, secret_response;
- struct snp_secrets_page_layout *layout;
+ struct snp_secrets_page *secrets;
struct snp_req_data input;
union {
struct snp_report_req report;
@@ -743,26 +743,26 @@ static const struct file_operations snp_guest_fops = {
.unlocked_ioctl = snp_guest_ioctl,
};
-static u8 *get_vmpck(int id, struct snp_secrets_page_layout *layout, u32 **seqno)
+static u8 *get_vmpck(int id, struct snp_secrets_page *secrets, u32 **seqno)
{
u8 *key = NULL;
switch (id) {
case 0:
- *seqno = &layout->os_area.msg_seqno_0;
- key = layout->vmpck0;
+ *seqno = &secrets->os_area.msg_seqno_0;
+ key = secrets->vmpck0;
break;
case 1:
- *seqno = &layout->os_area.msg_seqno_1;
- key = layout->vmpck1;
+ *seqno = &secrets->os_area.msg_seqno_1;
+ key = secrets->vmpck1;
break;
case 2:
- *seqno = &layout->os_area.msg_seqno_2;
- key = layout->vmpck2;
+ *seqno = &secrets->os_area.msg_seqno_2;
+ key = secrets->vmpck2;
break;
case 3:
- *seqno = &layout->os_area.msg_seqno_3;
- key = layout->vmpck3;
+ *seqno = &secrets->os_area.msg_seqno_3;
+ key = secrets->vmpck3;
break;
default:
break;
@@ -897,8 +897,8 @@ static void unregister_sev_tsm(void *data)
static int __init sev_guest_probe(struct platform_device *pdev)
{
- struct snp_secrets_page_layout *layout;
struct sev_guest_platform_data *data;
+ struct snp_secrets_page *secrets;
struct device *dev = &pdev->dev;
struct snp_guest_dev *snp_dev;
struct miscdevice *misc;
@@ -916,7 +916,7 @@ static int __init sev_guest_probe(struct platform_device *pdev)
if (!mapping)
return -ENODEV;
- layout = (__force void *)mapping;
+ secrets = (__force void *)mapping;
ret = -ENOMEM;
snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
@@ -924,7 +924,7 @@ static int __init sev_guest_probe(struct platform_device *pdev)
goto e_unmap;
ret = -EINVAL;
- snp_dev->vmpck = get_vmpck(vmpck_id, layout, &snp_dev->os_area_msg_seqno);
+ snp_dev->vmpck = get_vmpck(vmpck_id, secrets, &snp_dev->os_area_msg_seqno);
if (!snp_dev->vmpck) {
dev_err(dev, "invalid vmpck id %d\n", vmpck_id);
goto e_unmap;
@@ -938,7 +938,7 @@ static int __init sev_guest_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, snp_dev);
snp_dev->dev = dev;
- snp_dev->layout = layout;
+ snp_dev->secrets = secrets;
/* Allocate the shared page used for the request and response message. */
snp_dev->request = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
diff --git a/drivers/virt/vmgenid.c b/drivers/virt/vmgenid.c
index b67a28da4702..eba4250ab3cf 100644
--- a/drivers/virt/vmgenid.c
+++ b/drivers/virt/vmgenid.c
@@ -68,7 +68,6 @@ out:
static void vmgenid_notify(struct acpi_device *device, u32 event)
{
struct vmgenid_state *state = acpi_driver_data(device);
- char *envp[] = { "NEW_VMGENID=1", NULL };
u8 old_id[VMGENID_SIZE];
memcpy(old_id, state->this_id, sizeof(old_id));
@@ -76,7 +75,6 @@ static void vmgenid_notify(struct acpi_device *device, u32 event)
if (!memcmp(old_id, state->this_id, sizeof(old_id)))
return;
add_vmfork_randomness(state->this_id, sizeof(state->this_id));
- kobject_uevent_env(&device->dev.kobj, KOBJ_CHANGE, envp);
}
static const struct acpi_device_id vmgenid_ids[] = {
@@ -88,7 +86,6 @@ static const struct acpi_device_id vmgenid_ids[] = {
static struct acpi_driver vmgenid_driver = {
.name = "vmgenid",
.ids = vmgenid_ids,
- .owner = THIS_MODULE,
.ops = {
.add = vmgenid_add,
.notify = vmgenid_notify
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index f173587893cb..9510c551dce8 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -362,14 +362,16 @@ static const struct bus_type virtio_bus = {
.remove = virtio_dev_remove,
};
-int register_virtio_driver(struct virtio_driver *driver)
+int __register_virtio_driver(struct virtio_driver *driver, struct module *owner)
{
/* Catch this early. */
BUG_ON(driver->feature_table_size && !driver->feature_table);
driver->driver.bus = &virtio_bus;
+ driver->driver.owner = owner;
+
return driver_register(&driver->driver);
}
-EXPORT_SYMBOL_GPL(register_virtio_driver);
+EXPORT_SYMBOL_GPL(__register_virtio_driver);
void unregister_virtio_driver(struct virtio_driver *driver)
{
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index 1b9928648583..2196474ce6ef 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -117,17 +117,13 @@ static struct resource __init *zorro_find_parent_resource(
int i;
for (i = 0; i < bridge->num_resources; i++) {
- struct resource *r = &bridge->resource[i];
-
- if (zorro_resource_start(z) >= r->start &&
- zorro_resource_end(z) <= r->end)
- return r;
+ if (resource_contains(&bridge->resource[i], &z->resource))
+ return &bridge->resource[i];
}
+
return &iomem_resource;
}
-
-
static int __init amiga_zorro_probe(struct platform_device *pdev)
{
struct zorro_bus *bus;
@@ -176,9 +172,7 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
z->slotsize = zi->slotsize;
sprintf(z->name, "Zorro device %08x", z->id);
zorro_name_device(z);
- z->resource.start = zi->boardaddr;
- z->resource.end = zi->boardaddr + zi->boardsize - 1;
- z->resource.name = z->name;
+ z->resource = DEFINE_RES_MEM_NAMED(zi->boardaddr, zi->boardsize, z->name);
r = zorro_find_parent_resource(pdev, z);
error = request_resource(r, &z->resource);
if (error && !(z->rom.er_Type & ERTF_MEMLIST))