summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c3
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h2
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c8
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h5
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx.c27
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx_reg.h2
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c11
-rw-r--r--drivers/accel/ivpu/ivpu_jsm_msg.c3
-rw-r--r--drivers/acpi/acpi_video.c7
-rw-r--r--drivers/acpi/nfit/core.c2
-rw-r--r--drivers/acpi/processor_idle.c3
-rw-r--r--drivers/acpi/processor_pdc.c1
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/acpi/thermal.c41
-rw-r--r--drivers/ata/ahci.c13
-rw-r--r--drivers/ata/ahci_ceva.c2
-rw-r--r--drivers/ata/ahci_dwc.c2
-rw-r--r--drivers/ata/ahci_mtk.c1
-rw-r--r--drivers/ata/ahci_mvebu.c2
-rw-r--r--drivers/ata/ahci_octeon.c4
-rw-r--r--drivers/ata/ahci_qoriq.c4
-rw-r--r--drivers/ata/ahci_seattle.c4
-rw-r--r--drivers/ata/ahci_sunxi.c2
-rw-r--r--drivers/ata/ahci_tegra.c5
-rw-r--r--drivers/ata/ahci_xgene.c16
-rw-r--r--drivers/ata/libahci.c42
-rw-r--r--drivers/ata/libahci_platform.c1
-rw-r--r--drivers/ata/libata-core.c511
-rw-r--r--drivers/ata/libata-eh.c222
-rw-r--r--drivers/ata/libata-sata.c131
-rw-r--r--drivers/ata/libata-scsi.c263
-rw-r--r--drivers/ata/libata-sff.c32
-rw-r--r--drivers/ata/libata-transport.c9
-rw-r--r--drivers/ata/libata.h7
-rw-r--r--drivers/ata/pata_arasan_cf.c6
-rw-r--r--drivers/ata/pata_buddha.c1
-rw-r--r--drivers/ata/pata_ep93xx.c25
-rw-r--r--drivers/ata/pata_falcon.c63
-rw-r--r--drivers/ata/pata_ftide010.c16
-rw-r--r--drivers/ata/pata_gayle.c1
-rw-r--r--drivers/ata/pata_imx.c37
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c24
-rw-r--r--drivers/ata/pata_mpc52xx.c10
-rw-r--r--drivers/ata/pata_parport/comm.c9
-rw-r--r--drivers/ata/pata_pxa.c6
-rw-r--r--drivers/ata/pata_rb532_cf.c6
-rw-r--r--drivers/ata/pata_sl82c105.c3
-rw-r--r--drivers/ata/sata_dwc_460ex.c8
-rw-r--r--drivers/ata/sata_fsl.c12
-rw-r--r--drivers/ata/sata_gemini.c10
-rw-r--r--drivers/ata/sata_highbank.c4
-rw-r--r--drivers/ata/sata_inic162x.c2
-rw-r--r--drivers/ata/sata_mv.c11
-rw-r--r--drivers/ata/sata_nv.c2
-rw-r--r--drivers/ata/sata_rcar.c17
-rw-r--r--drivers/ata/sata_sil24.c4
-rw-r--r--drivers/ata/sata_sx4.c1
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/regmap/regcache-rbtree.c3
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/nbd.c3
-rw-r--r--drivers/block/null_blk/main.c12
-rw-r--r--drivers/block/rbd.c416
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/bus/ti-sysc.c31
-rw-r--r--drivers/cache/Kconfig11
-rw-r--r--drivers/cache/Makefile3
-rw-r--r--drivers/cache/ax45mp_cache.c213
-rw-r--r--drivers/char/agp/parisc-agp.c2
-rw-r--r--drivers/char/tpm/tpm-chip.c2
-rw-r--r--drivers/char/tpm/tpm_crb.c38
-rw-r--r--drivers/clk/clk-si521xx.c9
-rw-r--r--drivers/clk/clk-versaclock3.c81
-rw-r--r--drivers/clk/sprd/ums512-clk.c2
-rw-r--r--drivers/clk/tegra/clk-bpmp.c2
-rw-r--r--drivers/clocksource/Kconfig7
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_arch_timer.c7
-rw-r--r--drivers/clocksource/hyperv_timer.c2
-rw-r--r--drivers/clocksource/timer-loongson1-pwm.c2
-rw-r--r--drivers/clocksource/timer-oxnas-rps.c288
-rw-r--r--drivers/clocksource/timer-sun5i.c288
-rw-r--r--drivers/comedi/Kconfig103
-rw-r--r--drivers/counter/Kconfig2
-rw-r--r--drivers/cpufreq/cpufreq.c53
-rw-r--r--drivers/cpufreq/cpufreq_governor.c4
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c2
-rw-r--r--drivers/cxl/acpi.c4
-rw-r--r--drivers/cxl/core/mbox.c23
-rw-r--r--drivers/cxl/core/port.c13
-rw-r--r--drivers/cxl/core/region.c41
-rw-r--r--drivers/cxl/pci.c7
-rw-r--r--drivers/dma/Kconfig21
-rw-r--r--drivers/dma/Makefile7
-rw-r--r--drivers/dma/apple-admac.c3
-rw-r--r--drivers/dma/at_hdmac.c2
-rw-r--r--drivers/dma/bcm-sba-raid.c4
-rw-r--r--drivers/dma/bestcomm/bestcomm.c3
-rw-r--r--drivers/dma/dma-jz4780.c1
-rw-r--r--drivers/dma/dmaengine.c82
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c1
-rw-r--r--drivers/dma/dw/rzn1-dmamux.c4
-rw-r--r--drivers/dma/ep93xx_dma.c4
-rw-r--r--drivers/dma/fsl-edma-common.c307
-rw-r--r--drivers/dma/fsl-edma-common.h127
-rw-r--r--drivers/dma/fsl-edma-main.c (renamed from drivers/dma/fsl-edma.c)313
-rw-r--r--drivers/dma/fsl-qdma.c4
-rw-r--r--drivers/dma/fsl_raid.c3
-rw-r--r--drivers/dma/fsldma.c3
-rw-r--r--drivers/dma/idxd/device.c41
-rw-r--r--drivers/dma/idxd/dma.c5
-rw-r--r--drivers/dma/idxd/idxd.h14
-rw-r--r--drivers/dma/idxd/init.c54
-rw-r--r--drivers/dma/idxd/perfmon.c7
-rw-r--r--drivers/dma/idxd/sysfs.c40
-rw-r--r--drivers/dma/img-mdc-dma.c1
-rw-r--r--drivers/dma/imx-dma.c2
-rw-r--r--drivers/dma/imx-sdma.c1
-rw-r--r--drivers/dma/ioat/dca.c2
-rw-r--r--drivers/dma/ioat/dma.h1
-rw-r--r--drivers/dma/ioat/init.c19
-rw-r--r--drivers/dma/ipu/Makefile2
-rw-r--r--drivers/dma/ipu/ipu_idmac.c1801
-rw-r--r--drivers/dma/ipu/ipu_intern.h173
-rw-r--r--drivers/dma/ipu/ipu_irq.c367
-rw-r--r--drivers/dma/lgm/lgm-dma.c7
-rw-r--r--drivers/dma/lpc18xx-dmamux.c4
-rw-r--r--drivers/dma/mcf-edma-main.c (renamed from drivers/dma/mcf-edma.c)43
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c1
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c1
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c2
-rw-r--r--drivers/dma/mpc512x_dma.c4
-rw-r--r--drivers/dma/mxs-dma.c1
-rw-r--r--drivers/dma/nbpfaxi.c1
-rw-r--r--drivers/dma/owl-dma.c5
-rw-r--r--drivers/dma/ppc4xx/adma.c2
-rw-r--r--drivers/dma/qcom/gpi.c3
-rw-r--r--drivers/dma/qcom/hidma.c14
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c5
-rw-r--r--drivers/dma/sh/rz-dmac.c17
-rw-r--r--drivers/dma/sh/shdmac.c8
-rw-r--r--drivers/dma/sprd-dma.c2
-rw-r--r--drivers/dma/ste_dma40.c4
-rw-r--r--drivers/dma/stm32-dma.c3
-rw-r--r--drivers/dma/stm32-dmamux.c4
-rw-r--r--drivers/dma/stm32-mdma.c1
-rw-r--r--drivers/dma/sun6i-dma.c2
-rw-r--r--drivers/dma/tegra186-gpc-dma.c2
-rw-r--r--drivers/dma/tegra20-apb-dma.c1
-rw-r--r--drivers/dma/tegra210-adma.c3
-rw-r--r--drivers/dma/ti/dma-crossbar.c5
-rw-r--r--drivers/dma/ti/edma.c1
-rw-r--r--drivers/dma/ti/k3-udma-glue.c3
-rw-r--r--drivers/dma/ti/k3-udma-private.c2
-rw-r--r--drivers/dma/ti/k3-udma.c1
-rw-r--r--drivers/dma/ti/omap-dma.c2
-rw-r--r--drivers/dma/xgene-dma.c3
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c74
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c3
-rw-r--r--drivers/firewire/core-device.c2
-rw-r--r--drivers/firewire/core-topology.c2
-rw-r--r--drivers/firewire/sbp2.c9
-rw-r--r--drivers/firmware/arm_ffa/driver.c16
-rw-r--r--drivers/firmware/arm_scmi/perf.c4
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c34
-rw-r--r--drivers/firmware/efi/efi.c32
-rw-r--r--drivers/firmware/efi/libstub/Makefile4
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c117
-rw-r--r--drivers/firmware/efi/libstub/efistub.h8
-rw-r--r--drivers/firmware/efi/libstub/kaslr.c159
-rw-r--r--drivers/firmware/efi/libstub/riscv-stub.c33
-rw-r--r--drivers/firmware/efi/libstub/unaccepted_memory.c2
-rw-r--r--drivers/firmware/imx/imx-dsp.c1
-rw-r--r--drivers/gpio/gpio-aspeed.c2
-rw-r--r--drivers/gpio/gpio-pmic-eic-sprd.c1
-rw-r--r--drivers/gpio/gpio-pxa.c1
-rw-r--r--drivers/gpio/gpio-sim.c60
-rw-r--r--drivers/gpio/gpio-tb10x.c6
-rw-r--r--drivers/gpio/gpio-timberdale.c5
-rw-r--r--drivers/gpio/gpio-zynq.c26
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c34
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c46
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c46
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c77
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c141
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c55
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c9
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h18
-rw-r--r--drivers/gpu/drm/amd/include/discovery.h38
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h9
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c41
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c56
-rw-r--r--drivers/gpu/drm/ci/arm.config69
-rw-r--r--drivers/gpu/drm/ci/arm64.config199
-rw-r--r--drivers/gpu/drm/ci/build-igt.sh35
-rw-r--r--drivers/gpu/drm/ci/build.sh157
-rw-r--r--drivers/gpu/drm/ci/build.yml110
-rwxr-xr-xdrivers/gpu/drm/ci/check-patch.py57
-rw-r--r--drivers/gpu/drm/ci/container.yml65
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml251
-rwxr-xr-xdrivers/gpu/drm/ci/igt_runner.sh77
-rw-r--r--drivers/gpu/drm/ci/image-tags.yml15
-rwxr-xr-xdrivers/gpu/drm/ci/lava-submit.sh57
-rw-r--r--drivers/gpu/drm/ci/static-checks.yml12
-rw-r--r--drivers/gpu/drm/ci/test.yml335
-rw-r--r--drivers/gpu/drm/ci/testlist.txt2912
-rw-r--r--drivers/gpu/drm/ci/x86_64.config111
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt19
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt21
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-fails.txt17
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt32
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-skips.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-fails.txt58
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-skips.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-fails.txt18
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt38
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-skips.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-fails.txt19
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt41
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt25
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt26
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt37
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-flakes.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt11
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-fails.txt48
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-skips.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt29
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt0
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt10
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt14
-rw-r--r--drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt12
-rw-r--r--drivers/gpu/drm/ci/xfails/meson-g12b-flakes.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt15
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-flakes.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-fails.txt25
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-skips.txt23
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt68
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt11
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt48
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt9
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt52
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt37
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt23
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt38
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-flakes.txt0
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt6
-rw-r--r--drivers/gpu/drm/drm_connector.c2
-rw-r--r--drivers/gpu/drm/drm_exec.c2
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c11
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c41
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c102
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c120
-rw-r--r--drivers/gpu/drm/i915/gvt/page_track.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c9
-rw-r--r--drivers/gpu/drm/i915/i915_request.c7
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c2
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c2
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c2
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c10
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_submit.c1
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c9
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c9
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h4
-rw-r--r--drivers/hid/hid-multitouch.c10
-rw-r--r--drivers/hid/hid-rmi.c10
-rw-r--r--drivers/hid/usbhid/hid-core.c11
-rw-r--r--drivers/hv/connection.c16
-rw-r--r--drivers/hv/hv.c131
-rw-r--r--drivers/hv/hv_balloon.c82
-rw-r--r--drivers/hv/hv_common.c48
-rw-r--r--drivers/hv/hyperv_vmbus.h11
-rw-r--r--drivers/hv/vmbus_drv.c3
-rw-r--r--drivers/hwmon/nct6775-core.c4
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c8
-rw-r--r--drivers/hwspinlock/qcom_hwspinlock.c11
-rw-r--r--drivers/hwspinlock/u8500_hsem.c6
-rw-r--r--drivers/i2c/Kconfig2
-rw-r--r--drivers/i2c/busses/Kconfig4
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c11
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c7
-rw-r--r--drivers/i2c/busses/i2c-at91-core.c18
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c7
-rw-r--r--drivers/i2c/busses/i2c-au1550.c15
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c20
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c16
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c27
-rw-r--r--drivers/i2c/busses/i2c-cadence.c1
-rw-r--r--drivers/i2c/busses/i2c-cpm.c4
-rw-r--r--drivers/i2c/busses/i2c-davinci.c16
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c17
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h3
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c12
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c22
-rw-r--r--drivers/i2c/busses/i2c-dln2.c6
-rw-r--r--drivers/i2c/busses/i2c-emev2.c2
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c12
-rw-r--r--drivers/i2c/busses/i2c-gxp.c3
-rw-r--r--drivers/i2c/busses/i2c-hisi.c12
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c10
-rw-r--r--drivers/i2c/busses/i2c-i801.c7
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c3
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c10
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c23
-rw-r--r--drivers/i2c/busses/i2c-imx.c10
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c2
-rw-r--r--drivers/i2c/busses/i2c-kempld.c19
-rw-r--r--drivers/i2c/busses/i2c-lpc2k.c9
-rw-r--r--drivers/i2c/busses/i2c-meson.c1
-rw-r--r--drivers/i2c/busses/i2c-microchip-corei2c.c5
-rw-r--r--drivers/i2c/busses/i2c-mlxbf.c60
-rw-r--r--drivers/i2c/busses/i2c-mlxcpld.c10
-rw-r--r--drivers/i2c/busses/i2c-mpc.c3
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c12
-rw-r--r--drivers/i2c/busses/i2c-mt7621.c3
-rw-r--r--drivers/i2c/busses/i2c-mxs.c1
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c4
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c12
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c18
-rw-r--r--drivers/i2c/busses/i2c-ocores.c10
-rw-r--r--drivers/i2c/busses/i2c-owl.c3
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c1
-rw-r--r--drivers/i2c/busses/i2c-pnx.c15
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c1
-rw-r--r--drivers/i2c/busses/i2c-pxa.c20
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c8
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c4
-rw-r--r--drivers/i2c/busses/i2c-qup.c16
-rw-r--r--drivers/i2c/busses/i2c-rcar.c12
-rw-r--r--drivers/i2c/busses/i2c-riic.c1
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c20
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c22
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c20
-rw-r--r--drivers/i2c/busses/i2c-sprd.c1
-rw-r--r--drivers/i2c/busses/i2c-st.c3
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c3
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c8
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c28
-rw-r--r--drivers/i2c/busses/i2c-tegra-bpmp.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c2
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c4
-rw-r--r--drivers/i2c/busses/i2c-virtio.c8
-rw-r--r--drivers/i2c/busses/i2c-xiic.c2
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c6
-rw-r--r--drivers/i2c/i2c-mux.c2
-rw-r--r--drivers/i2c/muxes/Kconfig6
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c4
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c4
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpmux.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-ltc4306.c3
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c101
-rw-r--r--drivers/i3c/master.c6
-rw-r--r--drivers/i3c/master/ast2600-i3c-master.c1
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c1
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v1.c2
-rw-r--r--drivers/i3c/master/svc-i3c-master.c19
-rw-r--r--drivers/infiniband/core/cache.c11
-rw-r--r--drivers/infiniband/core/cma.c34
-rw-r--r--drivers/infiniband/core/cma_configfs.c2
-rw-r--r--drivers/infiniband/core/iwpm_util.c2
-rw-r--r--drivers/infiniband/core/netlink.c2
-rw-r--r--drivers/infiniband/core/nldev.c1
-rw-r--r--drivers/infiniband/core/uverbs_main.c37
-rw-r--r--drivers/infiniband/core/uverbs_std_types_counters.c2
-rw-r--r--drivers/infiniband/core/verbs.c109
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h35
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c84
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h55
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c259
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h6
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c277
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c47
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c77
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c38
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h23
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c85
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h13
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c8
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h10
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c24
-rw-r--r--drivers/infiniband/hw/erdma/erdma_hw.h18
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c2
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c433
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h36
-rw-r--r--drivers/infiniband/hw/hfi1/Makefile1
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c8
-rw-r--r--drivers/infiniband/hw/hfi1/device.c72
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h4
-rw-r--r--drivers/infiniband/hw/hfi1/pin_system.c474
-rw-r--r--drivers/infiniband/hw/hfi1/pinning.h20
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c9
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c441
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h17
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h35
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c151
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c86
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c28
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c75
-rw-r--r--drivers/infiniband/hw/irdma/cm.c90
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c23
-rw-r--r--drivers/infiniband/hw/irdma/hw.c63
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.c1
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.h2
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.c1
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.h1
-rw-r--r--drivers/infiniband/hw/irdma/irdma.h1
-rw-r--r--drivers/infiniband/hw/irdma/main.h8
-rw-r--r--drivers/infiniband/hw/irdma/type.h3
-rw-r--r--drivers/infiniband/hw/irdma/uk.c218
-rw-r--r--drivers/infiniband/hw/irdma/user.h19
-rw-r--r--drivers/infiniband/hw/irdma/utils.c25
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c259
-rw-r--r--drivers/infiniband/hw/irdma/verbs.h5
-rw-r--r--drivers/infiniband/hw/mlx4/main.c47
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c2
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c2
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c40
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c22
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c20
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.c1
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c17
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c159
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c45
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c60
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c1
-rw-r--r--drivers/infiniband/sw/siw/siw.h4
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c17
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c62
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c4
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c52
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c12
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c19
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c15
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c20
-rw-r--r--drivers/input/gameport/Kconfig4
-rw-r--r--drivers/input/gameport/gameport.c26
-rw-r--r--drivers/input/joystick/xpad.c25
-rw-r--r--drivers/input/keyboard/adp5588-keys.c17
-rw-r--r--drivers/input/keyboard/amikbd.c25
-rw-r--r--drivers/input/keyboard/bcm-keypad.c24
-rw-r--r--drivers/input/keyboard/gpio_keys.c21
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c8
-rw-r--r--drivers/input/keyboard/lm8323.c95
-rw-r--r--drivers/input/keyboard/lm8333.c44
-rw-r--r--drivers/input/keyboard/lpc32xx-keys.c9
-rw-r--r--drivers/input/keyboard/mcs_touchkey.c65
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c127
-rw-r--r--drivers/input/keyboard/nspire-keypad.c3
-rw-r--r--drivers/input/keyboard/omap4-keypad.c9
-rw-r--r--drivers/input/keyboard/opencores-kbd.c9
-rw-r--r--drivers/input/keyboard/pinephone-keyboard.c20
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c9
-rw-r--r--drivers/input/keyboard/qt1070.c46
-rw-r--r--drivers/input/keyboard/qt2160.c130
-rw-r--r--drivers/input/keyboard/sun4i-lradc-keys.c6
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c141
-rw-r--r--drivers/input/keyboard/tegra-kbc.c2
-rw-r--r--drivers/input/keyboard/tm2-touchkey.c1
-rw-r--r--drivers/input/misc/Kconfig4
-rw-r--r--drivers/input/misc/cpcap-pwrbutton.c12
-rw-r--r--drivers/input/misc/da9063_onkey.c9
-rw-r--r--drivers/input/misc/gpio-vibra.c22
-rw-r--r--drivers/input/misc/iqs269a.c2
-rw-r--r--drivers/input/misc/iqs626a.c2
-rw-r--r--drivers/input/misc/iqs7222.c478
-rw-r--r--drivers/input/misc/mma8450.c2
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c1
-rw-r--r--drivers/input/misc/pm8xxx-vibrator.c1
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c1
-rw-r--r--drivers/input/misc/pwm-beeper.c19
-rw-r--r--drivers/input/misc/pwm-vibra.c32
-rw-r--r--drivers/input/misc/rotary_encoder.c9
-rw-r--r--drivers/input/misc/sparcspkr.c3
-rw-r--r--drivers/input/mouse/elan_i2c_core.c9
-rw-r--r--drivers/input/mouse/psmouse-smbus.c19
-rw-r--r--drivers/input/serio/apbps2.c2
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h7
-rw-r--r--drivers/input/serio/i8042-sparcio.h4
-rw-r--r--drivers/input/serio/rpckbd.c8
-rw-r--r--drivers/input/serio/xilinx_ps2.c4
-rw-r--r--drivers/input/touchscreen/Kconfig14
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/bu21013_ts.c72
-rw-r--r--drivers/input/touchscreen/bu21029_ts.c51
-rw-r--r--drivers/input/touchscreen/chipone_icn8318.c8
-rw-r--r--drivers/input/touchscreen/cy8ctma140.c8
-rw-r--r--drivers/input/touchscreen/cyttsp5.c2
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c10
-rw-r--r--drivers/input/touchscreen/ektf2127.c8
-rw-r--r--drivers/input/touchscreen/elants_i2c.c22
-rw-r--r--drivers/input/touchscreen/exc3000.c17
-rw-r--r--drivers/input/touchscreen/goodix.c41
-rw-r--r--drivers/input/touchscreen/ili210x.c2
-rw-r--r--drivers/input/touchscreen/iqs5xx.c2
-rw-r--r--drivers/input/touchscreen/iqs7211.c2557
-rw-r--r--drivers/input/touchscreen/lpc32xx_ts.c98
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c9
-rw-r--r--drivers/input/touchscreen/mms114.c89
-rw-r--r--drivers/input/touchscreen/novatek-nvt-ts.c10
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c40
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c30
-rw-r--r--drivers/input/touchscreen/resistive-adc-touch.c8
-rw-r--r--drivers/input/touchscreen/silead.c8
-rw-r--r--drivers/input/touchscreen/sis_i2c.c20
-rw-r--r--drivers/input/touchscreen/surface3_spi.c13
-rw-r--r--drivers/input/touchscreen/sx8654.c10
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c1
-rw-r--r--drivers/iommu/amd/amd_iommu.h7
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h22
-rw-r--r--drivers/iommu/amd/init.c131
-rw-r--r--drivers/iommu/amd/iommu.c86
-rw-r--r--drivers/iommu/amd/iommu_v2.c7
-rw-r--r--drivers/iommu/apple-dart.c5
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c20
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c60
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h2
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c2
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c7
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c1
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c71
-rw-r--r--drivers/iommu/dma-iommu.c26
-rw-r--r--drivers/iommu/dma-iommu.h8
-rw-r--r--drivers/iommu/intel/iommu.c260
-rw-r--r--drivers/iommu/intel/iommu.h11
-rw-r--r--drivers/iommu/intel/pasid.c4
-rw-r--r--drivers/iommu/intel/pasid.h2
-rw-r--r--drivers/iommu/intel/svm.c62
-rw-r--r--drivers/iommu/iommu-sva.c29
-rw-r--r--drivers/iommu/iommu-sysfs.c8
-rw-r--r--drivers/iommu/iommu.c483
-rw-r--r--drivers/iommu/ipmmu-vmsa.c21
-rw-r--r--drivers/iommu/mtk_iommu.c150
-rw-r--r--drivers/iommu/of_iommu.c2
-rw-r--r--drivers/iommu/rockchip-iommu.c50
-rw-r--r--drivers/iommu/sprd-iommu.c2
-rw-r--r--drivers/iommu/tegra-smmu.c2
-rw-r--r--drivers/iommu/virtio-iommu.c2
-rw-r--r--drivers/irqchip/irq-gic-common.h4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c170
-rw-r--r--drivers/irqchip/irq-gic-v3.c13
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c4
-rw-r--r--drivers/irqchip/irq-riscv-intc.c10
-rw-r--r--drivers/irqchip/irq-stm32-exti.c1
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c1
-rw-r--r--drivers/irqchip/qcom-pdc.c69
-rw-r--r--drivers/leds/Kconfig9
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/blink/Kconfig4
-rw-r--r--drivers/leds/flash/Kconfig2
-rw-r--r--drivers/leds/flash/leds-qcom-flash.c5
-rw-r--r--drivers/leds/led-class-multicolor.c8
-rw-r--r--drivers/leds/led-class.c76
-rw-r--r--drivers/leds/led-core.c4
-rw-r--r--drivers/leds/leds-an30259a.c2
-rw-r--r--drivers/leds/leds-ariel.c2
-rw-r--r--drivers/leds/leds-aw200xx.c4
-rw-r--r--drivers/leds/leds-aw2013.c38
-rw-r--r--drivers/leds/leds-cpcap.c2
-rw-r--r--drivers/leds/leds-cr0014114.c2
-rw-r--r--drivers/leds/leds-ip30.c8
-rw-r--r--drivers/leds/leds-is31fl32xx.c1
-rw-r--r--drivers/leds/leds-lp5521.c5
-rw-r--r--drivers/leds/leds-lp5523.c4
-rw-r--r--drivers/leds/leds-lp5562.c4
-rw-r--r--drivers/leds/leds-lp8501.c4
-rw-r--r--drivers/leds/leds-mlxreg.c1
-rw-r--r--drivers/leds/leds-ns2.c2
-rw-r--r--drivers/leds/leds-pca9532.c1
-rw-r--r--drivers/leds/leds-pca995x.c204
-rw-r--r--drivers/leds/leds-pm8058.c1
-rw-r--r--drivers/leds/leds-pwm.c4
-rw-r--r--drivers/leds/leds-spi-byte.c2
-rw-r--r--drivers/leds/leds-syscon.c3
-rw-r--r--drivers/leds/leds-ti-lmu-common.c2
-rw-r--r--drivers/leds/leds-tlc591xx.c1
-rw-r--r--drivers/leds/leds-turris-omnia.c13
-rw-r--r--drivers/leds/rgb/Kconfig12
-rw-r--r--drivers/leds/rgb/Makefile1
-rw-r--r--drivers/leds/rgb/leds-group-multicolor.c169
-rw-r--r--drivers/leds/rgb/leds-qcom-lpg.c2
-rw-r--r--drivers/leds/simple/Kconfig14
-rw-r--r--drivers/leds/simple/Makefile1
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio-core.c4
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c57
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio.h2
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c13
-rw-r--r--drivers/leds/trigger/ledtrig-tty.c12
-rw-r--r--drivers/leds/uleds.c12
-rw-r--r--drivers/mailbox/arm_mhu.c1
-rw-r--r--drivers/mailbox/arm_mhu_db.c1
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c10
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c18
-rw-r--r--drivers/mailbox/hi3660-mailbox.c1
-rw-r--r--drivers/mailbox/hi6220-mailbox.c1
-rw-r--r--drivers/mailbox/imx-mailbox.c3
-rw-r--r--drivers/mailbox/mailbox-mpfs.c1
-rw-r--r--drivers/mailbox/mailbox-test.c8
-rw-r--r--drivers/mailbox/mailbox.c1
-rw-r--r--drivers/mailbox/mtk-adsp-mailbox.c3
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c2
-rw-r--r--drivers/mailbox/omap-mailbox.c2
-rw-r--r--drivers/mailbox/platform_mhu.c5
-rw-r--r--drivers/mailbox/qcom-ipcc.c4
-rw-r--r--drivers/mailbox/rockchip-mailbox.c8
-rw-r--r--drivers/mailbox/sprd-mailbox.c2
-rw-r--r--drivers/mailbox/stm32-ipcc.c1
-rw-r--r--drivers/mailbox/tegra-hsp.c5
-rw-r--r--drivers/mailbox/ti-msgmgr.c16
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c2
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-crypt.c3
-rw-r--r--drivers/md/dm-ioctl.c7
-rw-r--r--drivers/md/dm-table.c32
-rw-r--r--drivers/md/dm-zoned-target.c15
-rw-r--r--drivers/md/dm.c23
-rw-r--r--drivers/md/md.c23
-rw-r--r--drivers/md/md.h3
-rw-r--r--drivers/md/raid1.c3
-rw-r--r--drivers/md/raid5.c7
-rw-r--r--drivers/media/common/videobuf2/frame_vector.c6
-rw-r--r--drivers/media/dvb-frontends/ascot2e.c2
-rw-r--r--drivers/media/dvb-frontends/atbm8830.c2
-rw-r--r--drivers/media/dvb-frontends/au8522_dig.c2
-rw-r--r--drivers/media/dvb-frontends/bcm3510.c2
-rw-r--r--drivers/media/dvb-frontends/cx22700.c2
-rw-r--r--drivers/media/dvb-frontends/cx22702.c2
-rw-r--r--drivers/media/dvb-frontends/cx24110.c2
-rw-r--r--drivers/media/dvb-frontends/cx24113.c2
-rw-r--r--drivers/media/dvb-frontends/cx24116.c2
-rw-r--r--drivers/media/dvb-frontends/cx24120.c2
-rw-r--r--drivers/media/dvb-frontends/cx24123.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c4
-rw-r--r--drivers/media/dvb-frontends/cxd2880/cxd2880_top.c2
-rw-r--r--drivers/media/dvb-frontends/dib0070.c2
-rw-r--r--drivers/media/dvb-frontends/dib0090.c4
-rw-r--r--drivers/media/dvb-frontends/dib3000mb.c2
-rw-r--r--drivers/media/dvb-frontends/dib3000mc.c2
-rw-r--r--drivers/media/dvb-frontends/dib7000m.c2
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c2
-rw-r--r--drivers/media/dvb-frontends/dib8000.c2
-rw-r--r--drivers/media/dvb-frontends/dib9000.c2
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c2
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c2
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c2
-rw-r--r--drivers/media/dvb-frontends/ds3000.c2
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c2
-rw-r--r--drivers/media/dvb-frontends/ec100.c2
-rw-r--r--drivers/media/dvb-frontends/helene.c4
-rw-r--r--drivers/media/dvb-frontends/horus3a.c2
-rw-r--r--drivers/media/dvb-frontends/isl6405.c2
-rw-r--r--drivers/media/dvb-frontends/isl6421.c2
-rw-r--r--drivers/media/dvb-frontends/isl6423.c2
-rw-r--r--drivers/media/dvb-frontends/itd1000.c2
-rw-r--r--drivers/media/dvb-frontends/ix2505v.c2
-rw-r--r--drivers/media/dvb-frontends/l64781.c2
-rw-r--r--drivers/media/dvb-frontends/lg2160.c2
-rw-r--r--drivers/media/dvb-frontends/lgdt3305.c2
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c2
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.c2
-rw-r--r--drivers/media/dvb-frontends/lgs8gxx.c2
-rw-r--r--drivers/media/dvb-frontends/lnbh25.c2
-rw-r--r--drivers/media/dvb-frontends/lnbp21.c4
-rw-r--r--drivers/media/dvb-frontends/lnbp22.c2
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c2
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.c2
-rw-r--r--drivers/media/dvb-frontends/mb86a16.c2
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c2
-rw-r--r--drivers/media/dvb-frontends/mt312.c2
-rw-r--r--drivers/media/dvb-frontends/mt352.c2
-rw-r--r--drivers/media/dvb-frontends/nxt200x.c2
-rw-r--r--drivers/media/dvb-frontends/nxt6000.c2
-rw-r--r--drivers/media/dvb-frontends/or51132.c2
-rw-r--r--drivers/media/dvb-frontends/or51211.c2
-rw-r--r--drivers/media/dvb-frontends/s5h1409.c2
-rw-r--r--drivers/media/dvb-frontends/s5h1411.c2
-rw-r--r--drivers/media/dvb-frontends/s5h1420.c2
-rw-r--r--drivers/media/dvb-frontends/s5h1432.c2
-rw-r--r--drivers/media/dvb-frontends/s921.c2
-rw-r--r--drivers/media/dvb-frontends/si21xx.c2
-rw-r--r--drivers/media/dvb-frontends/sp887x.c2
-rw-r--r--drivers/media/dvb-frontends/stb0899_drv.c2
-rw-r--r--drivers/media/dvb-frontends/stb6000.c2
-rw-r--r--drivers/media/dvb-frontends/stb6100.c2
-rw-r--r--drivers/media/dvb-frontends/stv0288.c2
-rw-r--r--drivers/media/dvb-frontends/stv0297.c2
-rw-r--r--drivers/media/dvb-frontends/stv0299.c2
-rw-r--r--drivers/media/dvb-frontends/stv0367.c6
-rw-r--r--drivers/media/dvb-frontends/stv0900_core.c2
-rw-r--r--drivers/media/dvb-frontends/stv090x.c2
-rw-r--r--drivers/media/dvb-frontends/stv6110.c2
-rw-r--r--drivers/media/dvb-frontends/stv6110x.c2
-rw-r--r--drivers/media/dvb-frontends/tda10021.c2
-rw-r--r--drivers/media/dvb-frontends/tda10023.c2
-rw-r--r--drivers/media/dvb-frontends/tda10048.c2
-rw-r--r--drivers/media/dvb-frontends/tda1004x.c4
-rw-r--r--drivers/media/dvb-frontends/tda10086.c2
-rw-r--r--drivers/media/dvb-frontends/tda665x.c2
-rw-r--r--drivers/media/dvb-frontends/tda8083.c2
-rw-r--r--drivers/media/dvb-frontends/tda8261.c2
-rw-r--r--drivers/media/dvb-frontends/tda826x.c2
-rw-r--r--drivers/media/dvb-frontends/ts2020.c2
-rw-r--r--drivers/media/dvb-frontends/tua6100.c2
-rw-r--r--drivers/media/dvb-frontends/ves1820.c2
-rw-r--r--drivers/media/dvb-frontends/ves1x93.c2
-rw-r--r--drivers/media/dvb-frontends/zl10036.c2
-rw-r--r--drivers/media/dvb-frontends/zl10039.c2
-rw-r--r--drivers/media/dvb-frontends/zl10353.c2
-rw-r--r--drivers/media/i2c/imx219.c13
-rw-r--r--drivers/media/i2c/max9286.c1
-rw-r--r--drivers/media/i2c/rdacm21.c1
-rw-r--r--drivers/media/pci/bt8xx/bttv-risc.c4
-rw-r--r--drivers/media/pci/bt8xx/dst.c2
-rw-r--r--drivers/media/pci/bt8xx/dst_ca.c2
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-dummy-fe.c2
-rw-r--r--drivers/media/pci/intel/Kconfig20
-rw-r--r--drivers/media/pci/intel/ipu3/Kconfig21
-rw-r--r--drivers/media/pci/intel/ivsc/Kconfig5
-rw-r--r--drivers/media/platform/intel/pxa_camera.c2
-rw-r--r--drivers/media/platform/marvell/Kconfig4
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c3
-rw-r--r--drivers/media/platform/nxp/imx-mipi-csis.c2
-rw-r--r--drivers/media/platform/via/Kconfig2
-rw-r--r--drivers/media/tuners/fc0011.c2
-rw-r--r--drivers/media/tuners/fc0012.c2
-rw-r--r--drivers/media/tuners/fc0013.c2
-rw-r--r--drivers/media/tuners/max2165.c2
-rw-r--r--drivers/media/tuners/mc44s803.c2
-rw-r--r--drivers/media/tuners/mt2060.c2
-rw-r--r--drivers/media/tuners/mt2131.c2
-rw-r--r--drivers/media/tuners/mt2266.c2
-rw-r--r--drivers/media/tuners/mxl5005s.c2
-rw-r--r--drivers/media/tuners/qt1010.c2
-rw-r--r--drivers/media/tuners/tda18218.c2
-rw-r--r--drivers/media/tuners/xc2028.c2
-rw-r--r--drivers/media/tuners/xc4000.c2
-rw-r--r--drivers/media/tuners/xc5000.c2
-rw-r--r--drivers/media/usb/em28xx/Kconfig4
-rw-r--r--drivers/media/usb/go7007/Kconfig2
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c3
-rw-r--r--drivers/mfd/Kconfig6
-rw-r--r--drivers/mfd/ab8500-core.c1
-rw-r--r--drivers/mfd/acer-ec-a500.c2
-rw-r--r--drivers/mfd/act8945a.c4
-rw-r--r--drivers/mfd/altera-a10sr.c2
-rw-r--r--drivers/mfd/altera-sysmgr.c3
-rw-r--r--drivers/mfd/arizona-core.c1
-rw-r--r--drivers/mfd/atc260x-core.c1
-rw-r--r--drivers/mfd/atmel-hlcdc.c4
-rw-r--r--drivers/mfd/axp20x.c12
-rw-r--r--drivers/mfd/bcm590xx.c1
-rw-r--r--drivers/mfd/cros_ec_dev.c2
-rw-r--r--drivers/mfd/cs42l43.c4
-rw-r--r--drivers/mfd/cs47l15-tables.c8
-rw-r--r--drivers/mfd/cs47l24-tables.c2
-rw-r--r--drivers/mfd/cs47l35-tables.c8
-rw-r--r--drivers/mfd/cs47l85-tables.c8
-rw-r--r--drivers/mfd/cs47l90-tables.c8
-rw-r--r--drivers/mfd/cs47l92-tables.c8
-rw-r--r--drivers/mfd/da9052-i2c.c5
-rw-r--r--drivers/mfd/da9055-i2c.c1
-rw-r--r--drivers/mfd/da9062-core.c2
-rw-r--r--drivers/mfd/exynos-lpass.c4
-rw-r--r--drivers/mfd/hi6421-pmic-core.c2
-rw-r--r--drivers/mfd/hi655x-pmic.c7
-rw-r--r--drivers/mfd/ipaq-micro.c7
-rw-r--r--drivers/mfd/iqs62x.c2
-rw-r--r--drivers/mfd/lochnagar-i2c.c2
-rw-r--r--drivers/mfd/lp873x.c2
-rw-r--r--drivers/mfd/lp87565.c2
-rw-r--r--drivers/mfd/madera-i2c.c1
-rw-r--r--drivers/mfd/madera-spi.c1
-rw-r--r--drivers/mfd/max14577.c3
-rw-r--r--drivers/mfd/max77541.c2
-rw-r--r--drivers/mfd/max77620.c1
-rw-r--r--drivers/mfd/max77686.c1
-rw-r--r--drivers/mfd/max77843.c2
-rw-r--r--drivers/mfd/max8907.c1
-rw-r--r--drivers/mfd/max8925-core.c1
-rw-r--r--drivers/mfd/max8997.c1
-rw-r--r--drivers/mfd/max8998.c1
-rw-r--r--drivers/mfd/mc13xxx-i2c.c9
-rw-r--r--drivers/mfd/mt6358-irq.c5
-rw-r--r--drivers/mfd/mt6397-core.c5
-rw-r--r--drivers/mfd/mt6397-irq.c5
-rw-r--r--drivers/mfd/mxs-lradc.c2
-rw-r--r--drivers/mfd/omap-usb-host.c4
-rw-r--r--drivers/mfd/omap-usb-tll.c4
-rw-r--r--drivers/mfd/palmas.c3
-rw-r--r--drivers/mfd/qcom-pm8008.c2
-rw-r--r--drivers/mfd/qcom-pm8xxx.c10
-rw-r--r--drivers/mfd/rave-sp.c2
-rw-r--r--drivers/mfd/rk8xx-core.c2
-rw-r--r--drivers/mfd/rn5t618.c2
-rw-r--r--drivers/mfd/rohm-bd71828.c2
-rw-r--r--drivers/mfd/rohm-bd718x7.c2
-rw-r--r--drivers/mfd/rohm-bd9576.c2
-rw-r--r--drivers/mfd/rsmu_i2c.c2
-rw-r--r--drivers/mfd/rsmu_spi.c2
-rw-r--r--drivers/mfd/rt5033.c2
-rw-r--r--drivers/mfd/rz-mtu3.c34
-rw-r--r--drivers/mfd/sec-core.c2
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c2
-rw-r--r--drivers/mfd/ssbi.c6
-rw-r--r--drivers/mfd/stm32-lptimer.c1
-rw-r--r--drivers/mfd/stm32-timers.c4
-rw-r--r--drivers/mfd/stmpe-i2c.c2
-rw-r--r--drivers/mfd/stpmic1.c2
-rw-r--r--drivers/mfd/sun4i-gpadc.c4
-rw-r--r--drivers/mfd/tc3589x.c4
-rw-r--r--drivers/mfd/ti-lmu.c1
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c5
-rw-r--r--drivers/mfd/tps6507x.c1
-rw-r--r--drivers/mfd/tps65090.c1
-rw-r--r--drivers/mfd/tps65217.c1
-rw-r--r--drivers/mfd/tps65218.c1
-rw-r--r--drivers/mfd/tps6594-core.c2
-rw-r--r--drivers/mfd/twl6040.c2
-rw-r--r--drivers/mfd/wm5102-tables.c2
-rw-r--r--drivers/mfd/wm5110-tables.c2
-rw-r--r--drivers/mfd/wm831x-core.c3
-rw-r--r--drivers/mfd/wm831x-i2c.c2
-rw-r--r--drivers/mfd/wm831x-spi.c2
-rw-r--r--drivers/mfd/wm8994-core.c2
-rw-r--r--drivers/mfd/wm8994-regmap.c6
-rw-r--r--drivers/mfd/wm8997-tables.c2
-rw-r--r--drivers/mfd/wm8998-tables.c2
-rw-r--r--drivers/misc/cardreader/rts5227.c55
-rw-r--r--drivers/misc/cardreader/rts5228.c57
-rw-r--r--drivers/misc/cardreader/rts5249.c56
-rw-r--r--drivers/misc/cardreader/rts5260.c43
-rw-r--r--drivers/misc/cardreader/rts5261.c52
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c51
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c3
-rw-r--r--drivers/mtd/devices/docg3.c8
-rw-r--r--drivers/mtd/devices/mchp23k256.c2
-rw-r--r--drivers/mtd/devices/mchp48l640.c2
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c1
-rw-r--r--drivers/mtd/devices/spear_smi.c20
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c14
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c4
-rw-r--r--drivers/mtd/maps/lantiq-flash.c11
-rw-r--r--drivers/mtd/maps/physmap-bt1-rom.c1
-rw-r--r--drivers/mtd/maps/physmap-core.c3
-rw-r--r--drivers/mtd/maps/physmap-gemini.c2
-rw-r--r--drivers/mtd/maps/physmap-ixp4xx.c2
-rw-r--r--drivers/mtd/maps/physmap-ixp4xx.h1
-rw-r--r--drivers/mtd/maps/physmap-versatile.c4
-rw-r--r--drivers/mtd/maps/plat-ram.c3
-rw-r--r--drivers/mtd/maps/sun_uflash.c2
-rw-r--r--drivers/mtd/mtdblock.c2
-rw-r--r--drivers/mtd/mtdblock_ro.c2
-rw-r--r--drivers/mtd/mtdcore.c97
-rw-r--r--drivers/mtd/mtdcore.h1
-rw-r--r--drivers/mtd/mtdpart.c14
-rw-r--r--drivers/mtd/nand/ecc-mxic.c2
-rw-r--r--drivers/mtd/nand/ecc.c2
-rw-r--r--drivers/mtd/nand/onenand/onenand_omap2.c12
-rw-r--r--drivers/mtd/nand/onenand/onenand_samsung.c9
-rw-r--r--drivers/mtd/nand/raw/Kconfig9
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/ams-delta.c2
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c29
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c3
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c4
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c130
-rw-r--r--drivers/mtd/nand/raw/brcmnand/iproc_nand.c7
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c1
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c1
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c1
-rw-r--r--drivers/mtd/nand/raw/fsl_upm.c6
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c19
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c2
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.c1
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c1
-rw-r--r--drivers/mtd/nand/raw/intel-nand-controller.c15
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c3
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c15
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c19
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c72
-rw-r--r--drivers/mtd/nand/raw/mpc5121_nfc.c15
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c63
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c10
-rw-r--r--drivers/mtd/nand/raw/nand_base.c1
-rw-r--r--drivers/mtd/nand/raw/ndfc.c3
-rw-r--r--drivers/mtd/nand/raw/omap2.c5
-rw-r--r--drivers/mtd/nand/raw/orion_nand.c22
-rw-r--r--drivers/mtd/nand/raw/oxnas_nand.c209
-rw-r--r--drivers/mtd/nand/raw/pl35x-nand-controller.c4
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c1003
-rw-r--r--drivers/mtd/nand/raw/rockchip-nand-controller.c1
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c1
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c4
-rw-r--r--drivers/mtd/nand/raw/socrates_nand.c3
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c21
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c33
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c35
-rw-r--r--drivers/mtd/nand/raw/xway_nand.c3
-rw-r--r--drivers/mtd/nand/spi/esmt.c9
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c20
-rw-r--r--drivers/mtd/nand/spi/toshiba.c33
-rw-r--r--drivers/mtd/spi-nor/atmel.c8
-rw-r--r--drivers/mtd/spi-nor/controllers/nxp-spifi.c34
-rw-r--r--drivers/mtd/spi-nor/core.c57
-rw-r--r--drivers/mtd/spi-nor/core.h9
-rw-r--r--drivers/mtd/spi-nor/debugfs.c1
-rw-r--r--drivers/mtd/spi-nor/issi.c4
-rw-r--r--drivers/mtd/spi-nor/macronix.c4
-rw-r--r--drivers/mtd/spi-nor/micron-st.c8
-rw-r--r--drivers/mtd/spi-nor/spansion.c318
-rw-r--r--drivers/mtd/spi-nor/sst.c12
-rw-r--r--drivers/mtd/spi-nor/swp.c9
-rw-r--r--drivers/mtd/spi-nor/winbond.c9
-rw-r--r--drivers/mtd/spi-nor/xilinx.c4
-rw-r--r--drivers/mtd/ubi/build.c7
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c16
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c31
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h1
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h4
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c93
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c120
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c4
-rw-r--r--drivers/net/ethernet/adi/adin1110.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c3
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c10
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h4
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h54
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h1
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c5
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ethtool.c6
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c18
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c18
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c8
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c12
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c10
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c31
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c28
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c5
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c8
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.c8
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.h16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c43
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c49
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h4
-rw-r--r--drivers/net/ethernet/marvell/sky2.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c12
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c49
-rw-r--r--drivers/net/ethernet/microchip/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.c18
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c59
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c211
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h2
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c33
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h4
-rw-r--r--drivers/net/ethernet/sfc/rx.c20
-rw-r--r--drivers/net/ethernet/sfc/tc.c21
-rw-r--r--drivers/net/ethernet/sfc/tc_conntrack.c7
-rw-r--r--drivers/net/ethernet/sfc/tc_counters.c2
-rw-r--r--drivers/net/ethernet/sfc/tc_encap_actions.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c165
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c7
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c3
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c6
-rw-r--r--drivers/net/macsec.c3
-rw-r--r--drivers/net/phy/micrel.c9
-rw-r--r--drivers/net/team/team.c10
-rw-r--r--drivers/net/thunderbolt/main.c3
-rw-r--r--drivers/net/usb/r8152.c3
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/veth.c6
-rw-r--r--drivers/net/virtio_net.c228
-rw-r--r--drivers/net/vxlan/vxlan_core.c4
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c121
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c22
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_rx.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.h1
-rw-r--r--drivers/nfc/nxp-nci/i2c.c1
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c11
-rw-r--r--drivers/ntb/ntb_transport.c21
-rw-r--r--drivers/ntb/test/ntb_perf.c2
-rw-r--r--drivers/ntb/test/ntb_tool.c15
-rw-r--r--drivers/nvme/host/core.c54
-rw-r--r--drivers/nvme/host/fc.c2
-rw-r--r--drivers/nvme/host/hwmon.c2
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/nvme/target/tcp.c2
-rw-r--r--drivers/of/dynamic.c6
-rw-r--r--drivers/of/overlay.c2
-rw-r--r--drivers/parisc/ccio-dma.c18
-rw-r--r--drivers/parisc/iommu-helpers.h8
-rw-r--r--drivers/parisc/iosapic.c4
-rw-r--r--drivers/parisc/iosapic_private.h4
-rw-r--r--drivers/parisc/sba_iommu.c38
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c4
-rw-r--r--drivers/pci/of.c19
-rw-r--r--drivers/pci/of_property.c25
-rw-r--r--drivers/pci/pci-driver.c14
-rw-r--r--drivers/pci/pcie/aer.c1
-rw-r--r--drivers/pci/pcie/portdrv.h2
-rw-r--r--drivers/pci/probe.c1
-rw-r--r--drivers/pci/quirks.c2
-rw-r--r--drivers/perf/arm-cmn.c2
-rw-r--r--drivers/perf/arm_pmuv3.c2
-rw-r--r--drivers/perf/cxl_pmu.c2
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c2
-rw-r--r--drivers/phy/allwinner/phy-sun50i-usb3.c1
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c2
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c1
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-pcie.c1
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c1
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-usb2.c4
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c2
-rw-r--r--drivers/phy/amlogic/phy-meson-gxl-usb2.c2
-rw-r--r--drivers/phy/amlogic/phy-meson8-hdmi-tx.c2
-rw-r--r--drivers/phy/amlogic/phy-meson8b-usb2.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns-usb3.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm-sr-usb.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm63xx-usbh.c1
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c2
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c1
-rw-r--r--drivers/phy/cadence/cdns-dphy-rx.c1
-rw-r--r--drivers/phy/cadence/cdns-dphy.c3
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c98
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c1734
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c2
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8mq-usb.c4
-rw-r--r--drivers/phy/freescale/phy-fsl-lynx-28g.c1
-rw-r--r--drivers/phy/hisilicon/phy-hi3660-usb3.c1
-rw-r--r--drivers/phy/hisilicon/phy-hi3670-usb3.c1
-rw-r--r--drivers/phy/hisilicon/phy-hi6220-usb.c1
-rw-r--r--drivers/phy/hisilicon/phy-hisi-inno-usb2.c3
-rw-r--r--drivers/phy/hisilicon/phy-histb-combphy.c3
-rw-r--r--drivers/phy/hisilicon/phy-hix5hd2-sata.c1
-rw-r--r--drivers/phy/ingenic/phy-ingenic-usb.c1
-rw-r--r--drivers/phy/lantiq/phy-lantiq-rcu-usb2.c1
-rw-r--r--drivers/phy/marvell/phy-armada38x-comphy.c1
-rw-r--r--drivers/phy/marvell/phy-berlin-sata.c1
-rw-r--r--drivers/phy/marvell/phy-mmp3-hsic.c1
-rw-r--r--drivers/phy/marvell/phy-mmp3-usb.c1
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-comphy.c1
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-utmi.c2
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c4
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-utmi.c2
-rw-r--r--drivers/phy/marvell/phy-mvebu-sata.c1
-rw-r--r--drivers/phy/marvell/phy-pxa-28nm-usb2.c1
-rw-r--r--drivers/phy/marvell/phy-pxa-usb.c2
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.h1
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-dsi.c30
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-dsi.h2
-rw-r--r--drivers/phy/mediatek/phy-mtk-pcie.c2
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c2
-rw-r--r--drivers/phy/mediatek/phy-mtk-ufs.c1
-rw-r--r--drivers/phy/phy-can-transceiver.c1
-rw-r--r--drivers/phy/phy-xgene.c1
-rw-r--r--drivers/phy/qualcomm/Kconfig21
-rw-r--r--drivers/phy/qualcomm/Makefile2
-rw-r--r--drivers/phy/qualcomm/phy-ath79-usb.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-edp.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31.c294
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c492
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c483
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_20.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c13
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c1407
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c557
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-snps-eusb2.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hs.c2
-rw-r--r--drivers/phy/ralink/phy-mt7621-pci.c3
-rw-r--r--drivers/phy/renesas/phy-rcar-gen2.c1
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-pcie.c1
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c2
-rw-r--r--drivers/phy/renesas/r8a779f0-ether-serdes.c10
-rw-r--r--drivers/phy/rockchip/phy-rockchip-dphy-rx0.c1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c5
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-hdmi.c235
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c357
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c3
-rw-r--r--drivers/phy/rockchip/phy-rockchip-snps-pcie3.c3
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c3
-rw-r--r--drivers/phy/samsung/phy-exynos-dp-video.c2
-rw-r--r--drivers/phy/samsung/phy-exynos-mipi-video.c3
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c184
-rw-r--r--drivers/phy/samsung/phy-samsung-usb2.c2
-rw-r--r--drivers/phy/socionext/phy-uniphier-pcie.c2
-rw-r--r--drivers/phy/st/phy-spear1310-miphy.c3
-rw-r--r--drivers/phy/st/phy-spear1340-miphy.c3
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c3
-rw-r--r--drivers/phy/starfive/Kconfig38
-rw-r--r--drivers/phy/starfive/Makefile4
-rw-r--r--drivers/phy/starfive/phy-jh7110-dphy-rx.c232
-rw-r--r--drivers/phy/starfive/phy-jh7110-pcie.c204
-rw-r--r--drivers/phy/starfive/phy-jh7110-usb.c152
-rw-r--r--drivers/phy/sunplus/phy-sunplus-usb2.c2
-rw-r--r--drivers/phy/tegra/phy-tegra194-p2u.c2
-rw-r--r--drivers/phy/tegra/xusb.c2
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c9
-rw-r--r--drivers/phy/ti/phy-tusb1210.c1
-rw-r--r--drivers/phy/ti/phy-twl4030-usb.c1
-rw-r--r--drivers/phy/xilinx/phy-zynqmp.c95
-rw-r--r--drivers/platform/mellanox/Kconfig5
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c41
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c104
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c9
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/bioscfg.c14
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c8
-rw-r--r--drivers/platform/x86/intel/ifs/runtest.c7
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c66
-rw-r--r--drivers/platform/x86/think-lmi.c24
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c2
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c45
-rw-r--r--drivers/pmdomain/Makefile (renamed from drivers/genpd/Makefile)0
-rw-r--r--drivers/pmdomain/actions/Makefile (renamed from drivers/genpd/actions/Makefile)0
-rw-r--r--drivers/pmdomain/actions/owl-sps-helper.c (renamed from drivers/genpd/actions/owl-sps-helper.c)0
-rw-r--r--drivers/pmdomain/actions/owl-sps.c (renamed from drivers/genpd/actions/owl-sps.c)0
-rw-r--r--drivers/pmdomain/amlogic/Makefile (renamed from drivers/genpd/amlogic/Makefile)0
-rw-r--r--drivers/pmdomain/amlogic/meson-ee-pwrc.c (renamed from drivers/genpd/amlogic/meson-ee-pwrc.c)0
-rw-r--r--drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c (renamed from drivers/genpd/amlogic/meson-gx-pwrc-vpu.c)0
-rw-r--r--drivers/pmdomain/amlogic/meson-secure-pwrc.c (renamed from drivers/genpd/amlogic/meson-secure-pwrc.c)0
-rw-r--r--drivers/pmdomain/apple/Makefile (renamed from drivers/genpd/apple/Makefile)0
-rw-r--r--drivers/pmdomain/apple/pmgr-pwrstate.c (renamed from drivers/genpd/apple/pmgr-pwrstate.c)0
-rw-r--r--drivers/pmdomain/bcm/Makefile (renamed from drivers/genpd/bcm/Makefile)0
-rw-r--r--drivers/pmdomain/bcm/bcm-pmb.c (renamed from drivers/genpd/bcm/bcm-pmb.c)0
-rw-r--r--drivers/pmdomain/bcm/bcm2835-power.c (renamed from drivers/genpd/bcm/bcm2835-power.c)0
-rw-r--r--drivers/pmdomain/bcm/bcm63xx-power.c (renamed from drivers/genpd/bcm/bcm63xx-power.c)0
-rw-r--r--drivers/pmdomain/bcm/raspberrypi-power.c (renamed from drivers/genpd/bcm/raspberrypi-power.c)0
-rw-r--r--drivers/pmdomain/imx/Makefile (renamed from drivers/genpd/imx/Makefile)0
-rw-r--r--drivers/pmdomain/imx/gpc.c (renamed from drivers/genpd/imx/gpc.c)0
-rw-r--r--drivers/pmdomain/imx/gpcv2.c (renamed from drivers/genpd/imx/gpcv2.c)0
-rw-r--r--drivers/pmdomain/imx/imx8m-blk-ctrl.c (renamed from drivers/genpd/imx/imx8m-blk-ctrl.c)0
-rw-r--r--drivers/pmdomain/imx/imx8mp-blk-ctrl.c (renamed from drivers/genpd/imx/imx8mp-blk-ctrl.c)0
-rw-r--r--drivers/pmdomain/imx/imx93-blk-ctrl.c (renamed from drivers/genpd/imx/imx93-blk-ctrl.c)0
-rw-r--r--drivers/pmdomain/imx/imx93-pd.c (renamed from drivers/genpd/imx/imx93-pd.c)0
-rw-r--r--drivers/pmdomain/imx/scu-pd.c (renamed from drivers/genpd/imx/scu-pd.c)0
-rw-r--r--drivers/pmdomain/mediatek/Makefile (renamed from drivers/genpd/mediatek/Makefile)0
-rw-r--r--drivers/pmdomain/mediatek/mt6795-pm-domains.h (renamed from drivers/genpd/mediatek/mt6795-pm-domains.h)0
-rw-r--r--drivers/pmdomain/mediatek/mt8167-pm-domains.h (renamed from drivers/genpd/mediatek/mt8167-pm-domains.h)0
-rw-r--r--drivers/pmdomain/mediatek/mt8173-pm-domains.h (renamed from drivers/genpd/mediatek/mt8173-pm-domains.h)0
-rw-r--r--drivers/pmdomain/mediatek/mt8183-pm-domains.h (renamed from drivers/genpd/mediatek/mt8183-pm-domains.h)0
-rw-r--r--drivers/pmdomain/mediatek/mt8186-pm-domains.h (renamed from drivers/genpd/mediatek/mt8186-pm-domains.h)0
-rw-r--r--drivers/pmdomain/mediatek/mt8188-pm-domains.h (renamed from drivers/genpd/mediatek/mt8188-pm-domains.h)0
-rw-r--r--drivers/pmdomain/mediatek/mt8192-pm-domains.h (renamed from drivers/genpd/mediatek/mt8192-pm-domains.h)0
-rw-r--r--drivers/pmdomain/mediatek/mt8195-pm-domains.h (renamed from drivers/genpd/mediatek/mt8195-pm-domains.h)0
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.c (renamed from drivers/genpd/mediatek/mtk-pm-domains.c)0
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.h (renamed from drivers/genpd/mediatek/mtk-pm-domains.h)0
-rw-r--r--drivers/pmdomain/mediatek/mtk-scpsys.c (renamed from drivers/genpd/mediatek/mtk-scpsys.c)0
-rw-r--r--drivers/pmdomain/qcom/Makefile (renamed from drivers/genpd/qcom/Makefile)0
-rw-r--r--drivers/pmdomain/qcom/cpr.c (renamed from drivers/genpd/qcom/cpr.c)0
-rw-r--r--drivers/pmdomain/qcom/rpmhpd.c (renamed from drivers/genpd/qcom/rpmhpd.c)0
-rw-r--r--drivers/pmdomain/qcom/rpmpd.c (renamed from drivers/genpd/qcom/rpmpd.c)0
-rw-r--r--drivers/pmdomain/renesas/Makefile (renamed from drivers/genpd/renesas/Makefile)0
-rw-r--r--drivers/pmdomain/renesas/r8a7742-sysc.c (renamed from drivers/genpd/renesas/r8a7742-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a7743-sysc.c (renamed from drivers/genpd/renesas/r8a7743-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a7745-sysc.c (renamed from drivers/genpd/renesas/r8a7745-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a77470-sysc.c (renamed from drivers/genpd/renesas/r8a77470-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a774a1-sysc.c (renamed from drivers/genpd/renesas/r8a774a1-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a774b1-sysc.c (renamed from drivers/genpd/renesas/r8a774b1-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a774c0-sysc.c (renamed from drivers/genpd/renesas/r8a774c0-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a774e1-sysc.c (renamed from drivers/genpd/renesas/r8a774e1-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a7779-sysc.c (renamed from drivers/genpd/renesas/r8a7779-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a7790-sysc.c (renamed from drivers/genpd/renesas/r8a7790-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a7791-sysc.c (renamed from drivers/genpd/renesas/r8a7791-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a7792-sysc.c (renamed from drivers/genpd/renesas/r8a7792-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a7794-sysc.c (renamed from drivers/genpd/renesas/r8a7794-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a7795-sysc.c (renamed from drivers/genpd/renesas/r8a7795-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a7796-sysc.c (renamed from drivers/genpd/renesas/r8a7796-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a77965-sysc.c (renamed from drivers/genpd/renesas/r8a77965-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a77970-sysc.c (renamed from drivers/genpd/renesas/r8a77970-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a77980-sysc.c (renamed from drivers/genpd/renesas/r8a77980-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a77990-sysc.c (renamed from drivers/genpd/renesas/r8a77990-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a77995-sysc.c (renamed from drivers/genpd/renesas/r8a77995-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a779a0-sysc.c (renamed from drivers/genpd/renesas/r8a779a0-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a779f0-sysc.c (renamed from drivers/genpd/renesas/r8a779f0-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/r8a779g0-sysc.c (renamed from drivers/genpd/renesas/r8a779g0-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/rcar-gen4-sysc.c (renamed from drivers/genpd/renesas/rcar-gen4-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/rcar-gen4-sysc.h (renamed from drivers/genpd/renesas/rcar-gen4-sysc.h)0
-rw-r--r--drivers/pmdomain/renesas/rcar-sysc.c (renamed from drivers/genpd/renesas/rcar-sysc.c)0
-rw-r--r--drivers/pmdomain/renesas/rcar-sysc.h (renamed from drivers/genpd/renesas/rcar-sysc.h)0
-rw-r--r--drivers/pmdomain/renesas/rmobile-sysc.c (renamed from drivers/genpd/renesas/rmobile-sysc.c)0
-rw-r--r--drivers/pmdomain/rockchip/Makefile (renamed from drivers/genpd/rockchip/Makefile)0
-rw-r--r--drivers/pmdomain/rockchip/pm-domains.c (renamed from drivers/genpd/rockchip/pm-domains.c)0
-rw-r--r--drivers/pmdomain/samsung/Makefile (renamed from drivers/genpd/samsung/Makefile)0
-rw-r--r--drivers/pmdomain/samsung/exynos-pm-domains.c (renamed from drivers/genpd/samsung/exynos-pm-domains.c)0
-rw-r--r--drivers/pmdomain/st/Makefile (renamed from drivers/genpd/st/Makefile)0
-rw-r--r--drivers/pmdomain/st/ste-ux500-pm-domain.c (renamed from drivers/genpd/st/ste-ux500-pm-domain.c)0
-rw-r--r--drivers/pmdomain/starfive/Makefile (renamed from drivers/genpd/starfive/Makefile)0
-rw-r--r--drivers/pmdomain/starfive/jh71xx-pmu.c (renamed from drivers/genpd/starfive/jh71xx-pmu.c)0
-rw-r--r--drivers/pmdomain/sunxi/Makefile (renamed from drivers/genpd/sunxi/Makefile)0
-rw-r--r--drivers/pmdomain/sunxi/sun20i-ppu.c (renamed from drivers/genpd/sunxi/sun20i-ppu.c)0
-rw-r--r--drivers/pmdomain/tegra/Makefile (renamed from drivers/genpd/tegra/Makefile)0
-rw-r--r--drivers/pmdomain/tegra/powergate-bpmp.c (renamed from drivers/genpd/tegra/powergate-bpmp.c)0
-rw-r--r--drivers/pmdomain/ti/Makefile (renamed from drivers/genpd/ti/Makefile)0
-rw-r--r--drivers/pmdomain/ti/omap_prm.c (renamed from drivers/genpd/ti/omap_prm.c)0
-rw-r--r--drivers/pmdomain/ti/ti_sci_pm_domains.c (renamed from drivers/genpd/ti/ti_sci_pm_domains.c)0
-rw-r--r--drivers/pmdomain/xilinx/Makefile (renamed from drivers/genpd/xilinx/Makefile)0
-rw-r--r--drivers/pmdomain/xilinx/zynqmp-pm-domains.c (renamed from drivers/genpd/xilinx/zynqmp-pm-domains.c)0
-rw-r--r--drivers/power/reset/Kconfig2
-rw-r--r--drivers/power/reset/pwr-mlxbf.c2
-rw-r--r--drivers/power/reset/vexpress-poweroff.c2
-rw-r--r--drivers/power/supply/Kconfig1
-rw-r--r--drivers/power/supply/ab8500_btemp.c9
-rw-r--r--drivers/power/supply/ab8500_chargalg.c2
-rw-r--r--drivers/power/supply/mt6370-charger.c2
-rw-r--r--drivers/power/supply/power_supply_core.c4
-rw-r--r--drivers/power/supply/power_supply_sysfs.c7
-rw-r--r--drivers/power/supply/rk817_charger.c16
-rw-r--r--drivers/power/supply/rt9467-charger.c4
-rw-r--r--drivers/power/supply/ucs1002_power.c3
-rw-r--r--drivers/powercap/intel_rapl_common.c4
-rw-r--r--drivers/ptp/ptp_ocp.c1
-rw-r--r--drivers/pwm/Kconfig2
-rw-r--r--drivers/pwm/core.c41
-rw-r--r--drivers/pwm/pwm-apple.c1
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c66
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c123
-rw-r--r--drivers/pwm/pwm-atmel.c77
-rw-r--r--drivers/pwm/pwm-bcm-kona.c4
-rw-r--r--drivers/pwm/pwm-berlin.c1
-rw-r--r--drivers/pwm/pwm-crc.c4
-rw-r--r--drivers/pwm/pwm-cros-ec.c11
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c3
-rw-r--r--drivers/pwm/pwm-hibvt.c2
-rw-r--r--drivers/pwm/pwm-imx1.c1
-rw-r--r--drivers/pwm/pwm-jz4740.c2
-rw-r--r--drivers/pwm/pwm-lp3943.c5
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c34
-rw-r--r--drivers/pwm/pwm-lpc32xx.c16
-rw-r--r--drivers/pwm/pwm-mediatek.c1
-rw-r--r--drivers/pwm/pwm-meson.c1
-rw-r--r--drivers/pwm/pwm-microchip-core.c2
-rw-r--r--drivers/pwm/pwm-mtk-disp.c1
-rw-r--r--drivers/pwm/pwm-ntxec.c4
-rw-r--r--drivers/pwm/pwm-pxa.c10
-rw-r--r--drivers/pwm/pwm-rockchip.c4
-rw-r--r--drivers/pwm/pwm-rz-mtu3.c2
-rw-r--r--drivers/pwm/pwm-sifive.c5
-rw-r--r--drivers/pwm/pwm-sl28cpld.c14
-rw-r--r--drivers/pwm/pwm-sprd.c1
-rw-r--r--drivers/pwm/pwm-stm32.c14
-rw-r--r--drivers/pwm/pwm-stmpe.c17
-rw-r--r--drivers/pwm/pwm-sun4i.c1
-rw-r--r--drivers/pwm/pwm-sunplus.c1
-rw-r--r--drivers/pwm/pwm-tegra.c1
-rw-r--r--drivers/pwm/pwm-tiecap.c2
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c2
-rw-r--r--drivers/pwm/pwm-visconti.c2
-rw-r--r--drivers/pwm/pwm-vt8500.c5
-rw-r--r--drivers/regulator/core.c10
-rw-r--r--drivers/regulator/helpers.c2
-rw-r--r--drivers/regulator/mt6358-regulator.c18
-rw-r--r--drivers/regulator/tps6287x-regulator.c2
-rw-r--r--drivers/regulator/tps6594-regulator.c31
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c3
-rw-r--r--drivers/remoteproc/imx_rproc.c73
-rw-r--r--drivers/remoteproc/imx_rproc.h2
-rw-r--r--drivers/remoteproc/omap_remoteproc.c3
-rw-r--r--drivers/remoteproc/pru_rproc.c25
-rw-r--r--drivers/remoteproc/qcom_common.c21
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c26
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c87
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c102
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c3
-rw-r--r--drivers/remoteproc/qcom_sysmon.c1
-rw-r--r--drivers/remoteproc/qcom_wcnss.c30
-rw-r--r--drivers/remoteproc/qcom_wcnss_iris.c1
-rw-r--r--drivers/remoteproc/rcar_rproc.c3
-rw-r--r--drivers/remoteproc/remoteproc_coredump.c2
-rw-r--r--drivers/remoteproc/remoteproc_internal.h4
-rw-r--r--drivers/remoteproc/st_slim_rproc.c1
-rw-r--r--drivers/remoteproc/stm32_rproc.c16
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c2
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c3
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c2
-rw-r--r--drivers/rpmsg/qcom_glink_native.c61
-rw-r--r--drivers/rpmsg/rpmsg_char.c54
-rw-r--r--drivers/rpmsg/rpmsg_core.c21
-rw-r--r--drivers/rpmsg/rpmsg_internal.h2
-rw-r--r--drivers/rtc/Kconfig5
-rw-r--r--drivers/rtc/interface.c2
-rw-r--r--drivers/rtc/rtc-abx80x.c2
-rw-r--r--drivers/rtc/rtc-armada38x.c5
-rw-r--r--drivers/rtc/rtc-aspeed.c2
-rw-r--r--drivers/rtc/rtc-at91rm9200.c3
-rw-r--r--drivers/rtc/rtc-at91sam9.c2
-rw-r--r--drivers/rtc/rtc-cmos.c11
-rw-r--r--drivers/rtc/rtc-cros-ec.c38
-rw-r--r--drivers/rtc/rtc-da9063.c7
-rw-r--r--drivers/rtc/rtc-ds1305.c5
-rw-r--r--drivers/rtc/rtc-ds1307.c2
-rw-r--r--drivers/rtc/rtc-ds1742.c1
-rw-r--r--drivers/rtc/rtc-ds2404.c169
-rw-r--r--drivers/rtc/rtc-fsl-ftm-alarm.c5
-rw-r--r--drivers/rtc/rtc-isl12022.c126
-rw-r--r--drivers/rtc/rtc-isl12026.c5
-rw-r--r--drivers/rtc/rtc-isl1208.c19
-rw-r--r--drivers/rtc/rtc-jz4740.c4
-rw-r--r--drivers/rtc/rtc-lpc24xx.c3
-rw-r--r--drivers/rtc/rtc-m41t80.c2
-rw-r--r--drivers/rtc/rtc-m48t86.c8
-rw-r--r--drivers/rtc/rtc-mpc5121.c4
-rw-r--r--drivers/rtc/rtc-mt6397.c2
-rw-r--r--drivers/rtc/rtc-mt7622.c4
-rw-r--r--drivers/rtc/rtc-mxc.c1
-rw-r--r--drivers/rtc/rtc-nct3018y.c2
-rw-r--r--drivers/rtc/rtc-omap.c8
-rw-r--r--drivers/rtc/rtc-pcf2127.c872
-rw-r--r--drivers/rtc/rtc-pcf85063.c101
-rw-r--r--drivers/rtc/rtc-pcf85363.c31
-rw-r--r--drivers/rtc/rtc-pxa.c1
-rw-r--r--drivers/rtc/rtc-rs5c372.c5
-rw-r--r--drivers/rtc/rtc-rv3028.c82
-rw-r--r--drivers/rtc/rtc-rv3032.c2
-rw-r--r--drivers/rtc/rtc-rv8803.c5
-rw-r--r--drivers/rtc/rtc-rx6110.c1
-rw-r--r--drivers/rtc/rtc-rx8581.c1
-rw-r--r--drivers/rtc/rtc-rzn1.c5
-rw-r--r--drivers/rtc/rtc-s3c.c1
-rw-r--r--drivers/rtc/rtc-stm32.c143
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c1
-rw-r--r--drivers/rtc/rtc-sun6i.c3
-rw-r--r--drivers/rtc/rtc-sunplus.c2
-rw-r--r--drivers/rtc/rtc-sunxi.c2
-rw-r--r--drivers/rtc/rtc-ti-k3.c2
-rw-r--r--drivers/rtc/rtc-tps6586x.c1
-rw-r--r--drivers/rtc/rtc-tps65910.c7
-rw-r--r--drivers/rtc/rtc-twl.c43
-rw-r--r--drivers/rtc/rtc-wm8350.c9
-rw-r--r--drivers/s390/block/dasd_devmap.c6
-rw-r--r--drivers/s390/block/dasd_eckd.c10
-rw-r--r--drivers/s390/block/dasd_int.h4
-rw-r--r--drivers/s390/block/dcssblk.c13
-rw-r--r--drivers/s390/char/monreader.c12
-rw-r--r--drivers/s390/cio/airq.c4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c11
-rw-r--r--drivers/s390/scsi/zfcp_aux.c9
-rw-r--r--drivers/s390/virtio/virtio_ccw.c1
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/aic7xxx/aicasm/Makefile18
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c7
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c12
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c4
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c4
-rw-r--r--drivers/scsi/bfa/bfa_fc.h2
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c2
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.c8
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c20
-rw-r--r--drivers/scsi/fnic/fnic.h5
-rw-r--r--drivers/scsi/fnic/fnic_io.h2
-rw-r--r--drivers/scsi/fnic/fnic_main.c2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c115
-rw-r--r--drivers/scsi/gvp11.c5
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c16
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c9
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c14
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c27
-rw-r--r--drivers/scsi/hosts.c4
-rw-r--r--drivers/scsi/isci/host.h2
-rw-r--r--drivers/scsi/isci/init.c9
-rw-r--r--drivers/scsi/isci/phy.c2
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c4
-rw-r--r--drivers/scsi/libsas/sas_ata.c21
-rw-r--r--drivers/scsi/libsas/sas_discover.c10
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c4
-rw-r--r--drivers/scsi/libsas/sas_init.c16
-rw-r--r--drivers/scsi/libsas/sas_internal.h7
-rw-r--r--drivers/scsi/libsas/sas_phy.c8
-rw-r--r--drivers/scsi/libsas/sas_port.c8
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c54
-rw-r--r--drivers/scsi/lpfc/lpfc.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c136
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c70
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c78
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c53
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c94
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c40
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c23
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h23
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c37
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c155
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c50
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h3
-rw-r--r--drivers/scsi/mvsas/mv_init.c7
-rw-r--r--drivers/scsi/mvsas/mv_sas.c9
-rw-r--r--drivers/scsi/mvumi.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c14
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h2
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c56
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h2
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c31
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h2
-rw-r--r--drivers/scsi/pmcraid.c3
-rw-r--r--drivers/scsi/ppa.c84
-rw-r--r--drivers/scsi/ppa.h4
-rw-r--r--drivers/scsi/qedf/qedf.h1
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h2
-rw-r--r--drivers/scsi/qedf/qedf_debugfs.c35
-rw-r--r--drivers/scsi/qedf/qedf_io.c10
-rw-r--r--drivers/scsi/qedf/qedf_main.c7
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h66
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h25
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c251
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h59
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c322
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c177
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c407
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c86
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c15
-rw-r--r--drivers/scsi/qlogicpti.c4
-rw-r--r--drivers/scsi/scsi.c11
-rw-r--r--drivers/scsi/scsi_debugfs.c26
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/scsi_scan.c22
-rw-r--r--drivers/scsi/scsi_sysfs.c4
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c80
-rw-r--r--drivers/scsi/sd.c167
-rw-r--r--drivers/scsi/sd.h1
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h16
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c256
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/storvsc_drv.c9
-rw-r--r--drivers/scsi/sun_esp.c3
-rw-r--r--drivers/scsi/virtio_scsi.c2
-rw-r--r--drivers/scsi/xen-scsifront.c6
-rw-r--r--drivers/soc/imx/soc-imx8m.c10
-rw-r--r--drivers/soc/loongson/Kconfig1
-rw-r--r--drivers/soc/loongson/loongson2_guts.c6
-rw-r--r--drivers/soc/loongson/loongson2_pm.c7
-rw-r--r--drivers/soc/renesas/Kconfig5
-rw-r--r--drivers/soundwire/bus.c18
-rw-r--r--drivers/soundwire/intel_ace2x.c283
-rw-r--r--drivers/soundwire/intel_auxdevice.c112
-rw-r--r--drivers/soundwire/qcom.c1
-rw-r--r--drivers/spi/spi-cs42l43.c1
-rw-r--r--drivers/spi/spi-gxp.c2
-rw-r--r--drivers/spi/spi-imx.c2
-rw-r--r--drivers/spi/spi-intel-pci.c1
-rw-r--r--drivers/spi/spi-nxp-fspi.c7
-rw-r--r--drivers/spi/spi-stm32.c8
-rw-r--r--drivers/spi/spi-sun6i.c31
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c12
-rw-r--r--drivers/staging/greybus/pwm.c12
-rw-r--r--drivers/staging/media/atomisp/Kconfig2
-rw-r--r--drivers/staging/media/av7110/sp8870.c2
-rw-r--r--drivers/staging/media/tegra-video/vi.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c74
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c26
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h1
-rw-r--r--drivers/target/target_core_configfs.c24
-rw-r--r--drivers/target/target_core_device.c11
-rw-r--r--drivers/target/target_core_iblock.c7
-rw-r--r--drivers/target/target_core_transport.c1
-rw-r--r--drivers/tee/optee/optee_private.h2
-rw-r--r--drivers/tee/tee_private.h2
-rw-r--r--drivers/thermal/Kconfig12
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/armada_thermal.c5
-rw-r--r--drivers/thermal/broadcom/brcmstb_thermal.c1
-rw-r--r--drivers/thermal/broadcom/sr-thermal.c1
-rw-r--r--drivers/thermal/db8500_thermal.c2
-rw-r--r--drivers/thermal/dove_thermal.c4
-rw-r--r--drivers/thermal/imx8mm_thermal.c6
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c6
-rw-r--r--drivers/thermal/k3_bandgap.c1
-rw-r--r--drivers/thermal/k3_j72xx_bandgap.c2
-rw-r--r--drivers/thermal/kirkwood_thermal.c4
-rw-r--r--drivers/thermal/loongson2_thermal.c169
-rw-r--r--drivers/thermal/max77620_thermal.c2
-rw-r--r--drivers/thermal/mediatek/auxadc_thermal.c2
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c175
-rw-r--r--drivers/thermal/qcom/tsens-v0_1.c6
-rw-r--r--drivers/thermal/qcom/tsens-v1.c2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c2
-rw-r--r--drivers/thermal/spear_thermal.c4
-rw-r--r--drivers/thermal/sun8i_thermal.c8
-rw-r--r--drivers/thermal/tegra/tegra-bpmp-thermal.c52
-rw-r--r--drivers/thermal/thermal-generic-adc.c1
-rw-r--r--drivers/thermal/thermal_core.c24
-rw-r--r--drivers/thermal/thermal_core.h2
-rw-r--r--drivers/thermal/thermal_helpers.c3
-rw-r--r--drivers/thermal/thermal_of.c8
-rw-r--r--drivers/thermal/thermal_sysfs.c9
-rw-r--r--drivers/thermal/thermal_trip.c24
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c2
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c4
-rw-r--r--drivers/tty/n_gsm.c4
-rw-r--r--drivers/tty/serial/8250/8250_port.c5
-rw-r--r--drivers/ufs/core/Kconfig8
-rw-r--r--drivers/ufs/core/Makefile1
-rw-r--r--drivers/ufs/core/ufs-hwmon.c3
-rw-r--r--drivers/ufs/core/ufs-mcq.c34
-rw-r--r--drivers/ufs/core/ufs-sysfs.c22
-rw-r--r--drivers/ufs/core/ufs_bsg.c5
-rw-r--r--drivers/ufs/core/ufshcd-crypto.h20
-rw-r--r--drivers/ufs/core/ufshcd-priv.h4
-rw-r--r--drivers/ufs/core/ufshcd.c650
-rw-r--r--drivers/ufs/core/ufshpb.c2668
-rw-r--r--drivers/ufs/core/ufshpb.h318
-rw-r--r--drivers/ufs/host/cdns-pltfrm.c27
-rw-r--r--drivers/ufs/host/tc-dwc-g210-pci.c2
-rw-r--r--drivers/ufs/host/tc-dwc-g210.c32
-rw-r--r--drivers/ufs/host/ti-j721e-ufs.c2
-rw-r--r--drivers/ufs/host/ufs-mediatek.c180
-rw-r--r--drivers/ufs/host/ufs-mediatek.h33
-rw-r--r--drivers/ufs/host/ufs-qcom.c174
-rw-r--r--drivers/ufs/host/ufs-qcom.h4
-rw-r--r--drivers/ufs/host/ufs-renesas.c2
-rw-r--r--drivers/ufs/host/ufshcd-dwc.c22
-rw-r--r--drivers/ufs/host/ufshcd-pci.c3
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c13
-rw-r--r--drivers/usb/dwc3/dwc3-octeon.c2
-rw-r--r--drivers/usb/typec/ucsi/debugfs.c3
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h3
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c8
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c3
-rw-r--r--drivers/vfio/pci/pds/Kconfig2
-rw-r--r--drivers/vfio/pci/pds/vfio_dev.c2
-rw-r--r--drivers/vhost/vdpa.c15
-rw-r--r--drivers/vhost/vringh.c12
-rw-r--r--drivers/video/backlight/gpio_backlight.c3
-rw-r--r--drivers/video/backlight/led_bl.c2
-rw-r--r--drivers/video/backlight/lp855x_bl.c33
-rw-r--r--drivers/video/backlight/qcom-wled.c2
-rw-r--r--drivers/video/console/Kconfig1
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/core/Kconfig2
-rw-r--r--drivers/video/fbdev/g364fb.c2
-rw-r--r--drivers/virtio/virtio_ring.c412
-rw-r--r--drivers/virtio/virtio_vdpa.c17
-rw-r--r--drivers/w1/masters/ds2482.c2
-rw-r--r--drivers/watchdog/Kconfig95
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/armada_37xx_wdt.c1
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c3
-rw-r--r--drivers/watchdog/cpwd.c2
-rw-r--r--drivers/watchdog/ftwdt010_wdt.c6
-rw-r--r--drivers/watchdog/gef_wdt.c2
-rw-r--r--drivers/watchdog/imx2_wdt.c5
-rw-r--r--drivers/watchdog/imx7ulp_wdt.c1
-rw-r--r--drivers/watchdog/intel-mid_wdt.c1
-rw-r--r--drivers/watchdog/lantiq_wdt.c3
-rw-r--r--drivers/watchdog/loongson1_wdt.c1
-rw-r--r--drivers/watchdog/marvell_gti_wdt.c340
-rw-r--r--drivers/watchdog/menz69_wdt.c1
-rw-r--r--drivers/watchdog/meson_gxbb_wdt.c21
-rw-r--r--drivers/watchdog/meson_wdt.c4
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c4
-rw-r--r--drivers/watchdog/mtk_wdt.c1
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c3
-rw-r--r--drivers/watchdog/pic32-dmt.c1
-rw-r--r--drivers/watchdog/pic32-wdt.c1
-rw-r--r--drivers/watchdog/pika_wdt.c2
-rw-r--r--drivers/watchdog/pm8916_wdt.c2
-rw-r--r--drivers/watchdog/qcom-wdt.c1
-rw-r--r--drivers/watchdog/rave-sp-wdt.c2
-rw-r--r--drivers/watchdog/riowd.c2
-rw-r--r--drivers/watchdog/rti_wdt.c48
-rw-r--r--drivers/watchdog/rza_wdt.c4
-rw-r--r--drivers/watchdog/rzg2l_wdt.c2
-rw-r--r--drivers/watchdog/s3c2410_wdt.c16
-rw-r--r--drivers/watchdog/sama5d4_wdt.c8
-rw-r--r--drivers/watchdog/sbsa_gwdt.c3
-rw-r--r--drivers/watchdog/starfive-wdt.c13
-rw-r--r--drivers/watchdog/stm32_iwdg.c3
-rw-r--r--drivers/watchdog/sunxi_wdt.c1
-rw-r--r--drivers/watchdog/watchdog_core.c2
-rw-r--r--drivers/watchdog/xilinx_wwdt.c7
-rw-r--r--drivers/xen/events/events_base.c108
-rw-r--r--drivers/xen/platform-pci.c2
1804 files changed, 33716 insertions, 20773 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 496ca02ee18f..efb66e25fa2d 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -15,6 +15,8 @@ source "drivers/base/Kconfig"
source "drivers/bus/Kconfig"
+source "drivers/cache/Kconfig"
+
source "drivers/connector/Kconfig"
source "drivers/firmware/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 0957f63ecb42..1bec7819a837 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -11,6 +11,7 @@ ifdef building_out_of_srctree
MAKEFLAGS += --include-dir=$(srctree)
endif
+obj-y += cache/
obj-y += irqchip/
obj-y += bus/
@@ -45,7 +46,7 @@ obj-$(CONFIG_DMADEVICES) += dma/
# SOC specific infrastructure drivers.
obj-y += soc/
-obj-$(CONFIG_PM_GENERIC_DOMAINS) += genpd/
+obj-$(CONFIG_PM_GENERIC_DOMAINS) += pmdomain/
obj-y += virtio/
obj-$(CONFIG_VDPA) += vdpa/
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index ba79f397c9e8..467a60235370 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -327,7 +327,7 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
}
if (!ret)
- ivpu_info(vdev, "VPU ready message received successfully\n");
+ ivpu_dbg(vdev, PM, "VPU ready message received successfully\n");
else
ivpu_hw_diagnose_failure(vdev);
@@ -634,6 +634,7 @@ static void ivpu_dev_fini(struct ivpu_device *vdev)
static struct pci_device_id ivpu_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
{ }
};
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 9e8c075fe9ef..03b3d6532fb6 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -23,6 +23,7 @@
#define DRIVER_DATE "20230117"
#define PCI_DEVICE_ID_MTL 0x7d1d
+#define PCI_DEVICE_ID_ARL 0xad1d
#define PCI_DEVICE_ID_LNL 0x643e
#define IVPU_HW_37XX 37
@@ -165,6 +166,7 @@ static inline int ivpu_hw_gen(struct ivpu_device *vdev)
{
switch (ivpu_device_id(vdev)) {
case PCI_DEVICE_ID_MTL:
+ case PCI_DEVICE_ID_ARL:
return IVPU_HW_37XX;
case PCI_DEVICE_ID_LNL:
return IVPU_HW_40XX;
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 9827ea4d7b83..0191cf8e5964 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -220,7 +220,8 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
if (ret)
return ret;
- fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size, DRM_IVPU_BO_WC);
+ fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size,
+ DRM_IVPU_BO_CACHED | DRM_IVPU_BO_NOSNOOP);
if (!fw->mem) {
ivpu_err(vdev, "Failed to allocate firmware runtime memory\n");
return -ENOMEM;
@@ -330,7 +331,7 @@ int ivpu_fw_load(struct ivpu_device *vdev)
memset(start, 0, size);
}
- wmb(); /* Flush WC buffers after writing fw->mem */
+ clflush_cache_range(fw->mem->kvaddr, fw->mem->base.size);
return 0;
}
@@ -432,6 +433,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
if (!ivpu_fw_is_cold_boot(vdev)) {
boot_params->save_restore_ret_address = 0;
vdev->pm->is_warmboot = true;
+ clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K);
return;
}
@@ -493,7 +495,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
- wmb(); /* Flush WC buffers after writing bootparams */
+ clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K);
ivpu_fw_boot_params_print(vdev, boot_params);
}
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
index 6b0ceda5f253..f4130586ff1b 100644
--- a/drivers/accel/ivpu/ivpu_gem.h
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -8,6 +8,8 @@
#include <drm/drm_gem.h>
#include <drm/drm_mm.h>
+#define DRM_IVPU_BO_NOSNOOP 0x10000000
+
struct dma_buf;
struct ivpu_bo_ops;
struct ivpu_file_priv;
@@ -83,6 +85,9 @@ static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo)
static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
{
+ if (bo->flags & DRM_IVPU_BO_NOSNOOP)
+ return false;
+
return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
}
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
index 34626d66fa10..8bdb59a45da6 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
@@ -57,8 +57,7 @@
#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
-#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
- (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
+#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
(REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR)) | \
(REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR)) | \
(REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR)) | \
@@ -196,6 +195,14 @@ static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
}
+static int ivpu_wait_for_clock_own_resource_ack(struct ivpu_device *vdev)
+{
+ if (ivpu_is_simics(vdev))
+ return 0;
+
+ return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, CLOCK_RESOURCE_OWN_ACK, 1, TIMEOUT_US);
+}
+
static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
{
struct ivpu_hw_info *hw = vdev->hw;
@@ -556,6 +563,12 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
{
int ret;
+ ret = ivpu_wait_for_clock_own_resource_ack(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for clock own resource ACK\n");
+ return ret;
+ }
+
ivpu_boot_pwr_island_trickle_drive(vdev, true);
ivpu_boot_pwr_island_drive(vdev, true);
@@ -1046,8 +1059,6 @@ static irqreturn_t ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
if (status == 0)
return IRQ_NONE;
- REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT, status);
-
if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE");
@@ -1092,6 +1103,9 @@ static irqreturn_t ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
schedule_recovery = true;
}
+ /* This must be done after interrupts are cleared at the source. */
+ REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT, status);
+
if (schedule_recovery)
ivpu_pm_schedule_recovery(vdev);
@@ -1103,9 +1117,14 @@ static irqreturn_t ivpu_hw_40xx_irq_handler(int irq, void *ptr)
struct ivpu_device *vdev = ptr;
irqreturn_t ret = IRQ_NONE;
+ REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
+
ret |= ivpu_hw_40xx_irqv_handler(vdev, irq);
ret |= ivpu_hw_40xx_irqb_handler(vdev, irq);
+ /* Re-enable global interrupts to re-trigger MSI for pending interrupts */
+ REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
+
if (ret & IRQ_WAKE_THREAD)
return IRQ_WAKE_THREAD;
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
index 5139cfe88532..ff4a5d4f5821 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
+++ b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
@@ -70,6 +70,8 @@
#define VPU_40XX_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
#define VPU_40XX_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
#define VPU_40XX_BUTTRESS_VPU_STATUS_DUP_IDLE_MASK BIT_MASK(2)
+#define VPU_40XX_BUTTRESS_VPU_STATUS_CLOCK_RESOURCE_OWN_ACK_MASK BIT_MASK(6)
+#define VPU_40XX_BUTTRESS_VPU_STATUS_POWER_RESOURCE_OWN_ACK_MASK BIT_MASK(7)
#define VPU_40XX_BUTTRESS_VPU_STATUS_PERF_CLK_MASK BIT_MASK(11)
#define VPU_40XX_BUTTRESS_VPU_STATUS_DISABLE_CLK_RELINQUISH_MASK BIT_MASK(12)
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index fa0af59e39ab..295c0d7b5039 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -209,10 +209,10 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
struct ivpu_ipc_rx_msg *rx_msg;
int wait_ret, ret = 0;
- wait_ret = wait_event_interruptible_timeout(cons->rx_msg_wq,
- (IS_KTHREAD() && kthread_should_stop()) ||
- !list_empty(&cons->rx_msg_list),
- msecs_to_jiffies(timeout_ms));
+ wait_ret = wait_event_timeout(cons->rx_msg_wq,
+ (IS_KTHREAD() && kthread_should_stop()) ||
+ !list_empty(&cons->rx_msg_list),
+ msecs_to_jiffies(timeout_ms));
if (IS_KTHREAD() && kthread_should_stop())
return -EINTR;
@@ -220,9 +220,6 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
if (wait_ret == 0)
return -ETIMEDOUT;
- if (wait_ret < 0)
- return -ERESTARTSYS;
-
spin_lock_irq(&cons->rx_msg_lock);
rx_msg = list_first_entry_or_null(&cons->rx_msg_list, struct ivpu_ipc_rx_msg, link);
if (!rx_msg) {
diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c
index 831bfd2b2d39..bdddef2c59ee 100644
--- a/drivers/accel/ivpu/ivpu_jsm_msg.c
+++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
@@ -118,8 +118,7 @@ int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size
struct vpu_jsm_msg resp;
int ret;
- if (!strncpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN - 1))
- return -ENOMEM;
+ strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN);
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 948e31f7ce6e..b411948594ff 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -2057,7 +2057,9 @@ static int acpi_video_bus_add(struct acpi_device *device)
!auto_detect)
acpi_video_bus_register_backlight(video);
- acpi_video_bus_add_notify_handler(video);
+ error = acpi_video_bus_add_notify_handler(video);
+ if (error)
+ goto err_del;
error = acpi_dev_install_notify_handler(device, ACPI_DEVICE_NOTIFY,
acpi_video_bus_notify);
@@ -2067,10 +2069,11 @@ static int acpi_video_bus_add(struct acpi_device *device)
return 0;
err_remove:
+ acpi_video_bus_remove_notify_handler(video);
+err_del:
mutex_lock(&video_list_lock);
list_del(&video->entry);
mutex_unlock(&video_list_lock);
- acpi_video_bus_remove_notify_handler(video);
acpi_video_bus_unregister_backlight(video);
err_put_video:
acpi_video_bus_put_devices(video);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index f0e6738ae3c9..f96bf32cd368 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -855,7 +855,7 @@ static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
{
if (idt->header.length < sizeof(*idt))
return 0;
- return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
+ return sizeof(*idt) + sizeof(u32) * idt->line_count;
}
static bool add_idt(struct acpi_nfit_desc *acpi_desc,
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index dc615ef6550a..3a34a8c425fe 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -1217,8 +1217,7 @@ static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
strscpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
state->exit_latency = lpi->wake_latency;
state->target_residency = lpi->min_residency;
- if (lpi->arch_flags)
- state->flags |= CPUIDLE_FLAG_TIMER_STOP;
+ state->flags |= arch_get_idle_state_flags(lpi->arch_flags);
if (i != 0 && lpi->entry_method == ACPI_CSTATE_FFH)
state->flags |= CPUIDLE_FLAG_RCU_IDLE;
state->enter = acpi_idle_lpi_enter;
diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
index 1a8591e9a9bf..994091bd52de 100644
--- a/drivers/acpi/processor_pdc.c
+++ b/drivers/acpi/processor_pdc.c
@@ -19,6 +19,7 @@ static void acpi_set_pdc_bits(u32 *buf)
{
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
+ buf[2] = 0;
/* Twiddle arch-specific bits needed for _PDC */
arch_acpi_set_proc_cap_bits(&buf[2]);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 531a9e3df717..691d4b7686ee 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1584,7 +1584,7 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
* If we have reason to believe the IOMMU driver missed the initial
* iommu_probe_device() call for dev, replay it to get things in order.
*/
- if (!err && dev->bus && !device_iommu_mapped(dev))
+ if (!err && dev->bus)
err = iommu_probe_device(dev);
/* Ignore all other errors apart from EPROBE_DEFER */
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 419590f41ed5..312730f8272e 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -492,26 +492,22 @@ static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
}
static int thermal_get_trend(struct thermal_zone_device *thermal,
- int trip_index, enum thermal_trend *trend)
+ const struct thermal_trip *trip,
+ enum thermal_trend *trend)
{
struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
struct acpi_thermal_trip *acpi_trip;
- int t, i;
+ int t;
- if (!tz || trip_index < 0)
+ if (!tz || !trip)
return -EINVAL;
- if (tz->trips.critical.valid)
- trip_index--;
-
- if (tz->trips.hot.valid)
- trip_index--;
-
- if (trip_index < 0)
+ acpi_trip = trip->priv;
+ if (!acpi_trip || !acpi_trip->valid)
return -EINVAL;
- acpi_trip = &tz->trips.passive.trip;
- if (acpi_trip->valid && !trip_index--) {
+ switch (trip->type) {
+ case THERMAL_TRIP_PASSIVE:
t = tz->trips.passive.tc1 * (tz->temperature -
tz->last_temperature) +
tz->trips.passive.tc2 * (tz->temperature -
@@ -524,19 +520,18 @@ static int thermal_get_trend(struct thermal_zone_device *thermal,
*trend = THERMAL_TREND_STABLE;
return 0;
- }
-
- t = acpi_thermal_temp(tz, tz->temperature);
- for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
- acpi_trip = &tz->trips.active[i].trip;
- if (acpi_trip->valid && !trip_index--) {
- if (t > acpi_thermal_temp(tz, acpi_trip->temperature)) {
- *trend = THERMAL_TREND_RAISING;
- return 0;
- }
+ case THERMAL_TRIP_ACTIVE:
+ t = acpi_thermal_temp(tz, tz->temperature);
+ if (t <= trip->temperature)
break;
- }
+
+ *trend = THERMAL_TREND_RAISING;
+
+ return 0;
+
+ default:
+ break;
}
return -EINVAL;
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index addba109406b..08745e7db820 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -421,6 +421,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x34d3), board_ahci_low_power }, /* Ice Lake LP AHCI */
{ PCI_VDEVICE(INTEL, 0x02d3), board_ahci_low_power }, /* Comet Lake PCH-U AHCI */
{ PCI_VDEVICE(INTEL, 0x02d7), board_ahci_low_power }, /* Comet Lake PCH RAID */
+ /* Elkhart Lake IDs 0x4b60 & 0x4b62 https://sata-io.org/product/8803 not tested yet */
+ { PCI_VDEVICE(INTEL, 0x4b63), board_ahci_low_power }, /* Elkhart Lake AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -807,7 +809,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
- const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
@@ -1881,6 +1883,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
else
dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n");
+ if (!(hpriv->cap & HOST_CAP_PART))
+ host->flags |= ATA_HOST_NO_PART;
+
+ if (!(hpriv->cap & HOST_CAP_SSC))
+ host->flags |= ATA_HOST_NO_SSC;
+
+ if (!(hpriv->cap2 & HOST_CAP2_SDS))
+ host->flags |= ATA_HOST_NO_DEVSLP;
+
if (pi.flags & ATA_FLAG_EM)
ahci_reset_em(host);
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
index c2b6be083af4..64f7f7d6ba84 100644
--- a/drivers/ata/ahci_ceva.c
+++ b/drivers/ata/ahci_ceva.c
@@ -10,7 +10,7 @@
#include <linux/kernel.h>
#include <linux/libata.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include "ahci.h"
diff --git a/drivers/ata/ahci_dwc.c b/drivers/ata/ahci_dwc.c
index 9604a2f6ed48..ed263de3fd70 100644
--- a/drivers/ata/ahci_dwc.c
+++ b/drivers/ata/ahci_dwc.c
@@ -15,7 +15,7 @@
#include <linux/log2.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/regmap.h>
diff --git a/drivers/ata/ahci_mtk.c b/drivers/ata/ahci_mtk.c
index 5083fb6c4927..adc851cd5578 100644
--- a/drivers/ata/ahci_mtk.c
+++ b/drivers/ata/ahci_mtk.c
@@ -11,6 +11,7 @@
#include <linux/libata.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/regmap.h>
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index 764501518582..f3187351e8a6 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/mbus.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include "ahci.h"
diff --git a/drivers/ata/ahci_octeon.c b/drivers/ata/ahci_octeon.c
index e89807fa928e..9accf8923891 100644
--- a/drivers/ata/ahci_octeon.c
+++ b/drivers/ata/ahci_octeon.c
@@ -31,13 +31,11 @@ static int ahci_octeon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
- struct resource *res;
void __iomem *base;
u64 cfg;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 3d01b118c9a1..b1a4e57578e2 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -12,9 +12,7 @@
#include <linux/pm.h>
#include <linux/ahci_platform.h>
#include <linux/device.h>
-#include <linux/of_address.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/libata.h>
#include "ahci.h"
@@ -90,7 +88,7 @@ MODULE_DEVICE_TABLE(acpi, ahci_qoriq_acpi_match);
static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
- const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
void __iomem *port_mmio = ahci_port_base(link->ap);
u32 px_cmd, px_is, px_val;
struct ata_port *ap = link->ap;
diff --git a/drivers/ata/ahci_seattle.c b/drivers/ata/ahci_seattle.c
index 2c32d58c6ae7..59f97aa7ac75 100644
--- a/drivers/ata/ahci_seattle.c
+++ b/drivers/ata/ahci_seattle.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/device.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/libata.h>
#include <linux/ahci_platform.h>
@@ -132,8 +131,7 @@ static const struct ata_port_info *ahci_seattle_get_port_info(
if (!plat_data)
return &ahci_port_info;
- plat_data->sgpio_ctrl = devm_ioremap_resource(dev,
- platform_get_resource(pdev, IORESOURCE_MEM, 1));
+ plat_data->sgpio_ctrl = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(plat_data->sgpio_ctrl))
return &ahci_port_info;
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
index 04531fa95e40..58b2683954dd 100644
--- a/drivers/ata/ahci_sunxi.c
+++ b/drivers/ata/ahci_sunxi.c
@@ -13,8 +13,8 @@
#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include "ahci.h"
diff --git a/drivers/ata/ahci_tegra.c b/drivers/ata/ahci_tegra.c
index 21c20793e517..8703c2a4658b 100644
--- a/drivers/ata/ahci_tegra.c
+++ b/drivers/ata/ahci_tegra.c
@@ -12,7 +12,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -530,8 +530,7 @@ static int tegra_ahci_probe(struct platform_device *pdev)
tegra->pdev = pdev;
tegra->soc = of_device_get_match_data(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- tegra->sata_regs = devm_ioremap_resource(&pdev->dev, res);
+ tegra->sata_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(tegra->sata_regs))
return PTR_ERR(tegra->sata_regs);
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index eb773f2e28fc..ccef5e63bdf9 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -110,9 +110,8 @@ static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
* @timeout : timeout for achieving the value.
*/
static int xgene_ahci_poll_reg_val(struct ata_port *ap,
- void __iomem *reg, unsigned
- int val, unsigned long interval,
- unsigned long timeout)
+ void __iomem *reg, unsigned int val,
+ unsigned int interval, unsigned int timeout)
{
unsigned long deadline;
unsigned int tmp;
@@ -350,7 +349,7 @@ static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
static int xgene_ahci_do_hardreset(struct ata_link *link,
unsigned long deadline, bool *online)
{
- const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
struct ata_port *ap = link->ap;
struct ahci_host_priv *hpriv = ap->host->private_data;
struct xgene_ahci_context *ctx = hpriv->plat_data;
@@ -755,20 +754,17 @@ static int xgene_ahci_probe(struct platform_device *pdev)
ctx->dev = dev;
/* Retrieve the IP core resource */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- ctx->csr_core = devm_ioremap_resource(dev, res);
+ ctx->csr_core = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ctx->csr_core))
return PTR_ERR(ctx->csr_core);
/* Retrieve the IP diagnostic resource */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- ctx->csr_diag = devm_ioremap_resource(dev, res);
+ ctx->csr_diag = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(ctx->csr_diag))
return PTR_ERR(ctx->csr_diag);
/* Retrieve the IP AXI resource */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- ctx->csr_axi = devm_ioremap_resource(dev, res);
+ ctx->csr_axi = devm_platform_ioremap_resource(pdev, 3);
if (IS_ERR(ctx->csr_axi))
return PTR_ERR(ctx->csr_axi);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 06aec35f88f2..f1263364fa97 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1256,6 +1256,26 @@ static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
return sprintf(buf, "%d\n", emp->blink_policy);
}
+static void ahci_port_clear_pending_irq(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+
+ /* clear SError */
+ tmp = readl(port_mmio + PORT_SCR_ERR);
+ dev_dbg(ap->host->dev, "PORT_SCR_ERR 0x%x\n", tmp);
+ writel(tmp, port_mmio + PORT_SCR_ERR);
+
+ /* clear port IRQ */
+ tmp = readl(port_mmio + PORT_IRQ_STAT);
+ dev_dbg(ap->host->dev, "PORT_IRQ_STAT 0x%x\n", tmp);
+ if (tmp)
+ writel(tmp, port_mmio + PORT_IRQ_STAT);
+
+ writel(1 << ap->port_no, hpriv->mmio + HOST_IRQ_STAT);
+}
+
static void ahci_port_init(struct device *dev, struct ata_port *ap,
int port_no, void __iomem *mmio,
void __iomem *port_mmio)
@@ -1270,18 +1290,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
if (rc)
dev_warn(dev, "%s (%d)\n", emsg, rc);
- /* clear SError */
- tmp = readl(port_mmio + PORT_SCR_ERR);
- dev_dbg(dev, "PORT_SCR_ERR 0x%x\n", tmp);
- writel(tmp, port_mmio + PORT_SCR_ERR);
-
- /* clear port IRQ */
- tmp = readl(port_mmio + PORT_IRQ_STAT);
- dev_dbg(dev, "PORT_IRQ_STAT 0x%x\n", tmp);
- if (tmp)
- writel(tmp, port_mmio + PORT_IRQ_STAT);
-
- writel(1 << port_no, mmio + HOST_IRQ_STAT);
+ ahci_port_clear_pending_irq(ap);
/* mark esata ports */
tmp = readl(port_mmio + PORT_CMD);
@@ -1403,7 +1412,7 @@ EXPORT_SYMBOL_GPL(ahci_kick_engine);
static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
struct ata_taskfile *tf, int is_cmd, u16 flags,
- unsigned long timeout_msec)
+ unsigned int timeout_msec)
{
const u32 cmd_fis_len = 5; /* five dwords */
struct ahci_port_priv *pp = ap->private_data;
@@ -1448,7 +1457,8 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
const char *reason = NULL;
- unsigned long now, msecs;
+ unsigned long now;
+ unsigned int msecs;
struct ata_taskfile tf;
bool fbs_disabled = false;
int rc;
@@ -1587,7 +1597,7 @@ static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline, bool *online)
{
- const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
@@ -1602,6 +1612,8 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
tf.status = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+ ahci_port_clear_pending_irq(ap);
+
rc = sata_link_hardreset(link, timing, deadline, online,
ahci_check_ready);
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 9a8d43f54adc..581704e61f28 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -20,6 +20,7 @@
#include <linux/ahci_platform.h>
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/reset.h>
#include "ahci.h"
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 04db0f2c683a..d8cc1e27a125 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1586,13 +1586,11 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
}
}
- if (ap->ops->error_handler)
- ata_eh_release(ap);
+ ata_eh_release(ap);
rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
- if (ap->ops->error_handler)
- ata_eh_acquire(ap);
+ ata_eh_acquire(ap);
ata_sff_flush_pio_task(ap);
@@ -1607,10 +1605,7 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
if (qc->flags & ATA_QCFLAG_ACTIVE) {
qc->err_mask |= AC_ERR_TIMEOUT;
- if (ap->ops->error_handler)
- ata_port_freeze(ap);
- else
- ata_qc_complete(qc);
+ ata_port_freeze(ap);
ata_dev_warn(dev, "qc timeout after %u msecs (cmd 0x%x)\n",
timeout, command);
@@ -1978,6 +1973,96 @@ retry:
}
/**
+ * ata_dev_power_set_standby - Set a device power mode to standby
+ * @dev: target device
+ *
+ * Issue a STANDBY IMMEDIATE command to set a device power mode to standby.
+ * For an HDD device, this spins down the disks.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void ata_dev_power_set_standby(struct ata_device *dev)
+{
+ unsigned long ap_flags = dev->link->ap->flags;
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ /* Issue STANDBY IMMEDIATE command only if supported by the device */
+ if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC)
+ return;
+
+ /*
+ * Some odd clown BIOSes issue spindown on power off (ACPI S4 or S5)
+ * causing some drives to spin up and down again. For these, do nothing
+ * if we are being called on shutdown.
+ */
+ if ((ap_flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) &&
+ system_state == SYSTEM_POWER_OFF)
+ return;
+
+ if ((ap_flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
+ system_entering_hibernation())
+ return;
+
+ ata_tf_init(dev, &tf);
+ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+ tf.protocol = ATA_PROT_NODATA;
+ tf.command = ATA_CMD_STANDBYNOW1;
+
+ ata_dev_notice(dev, "Entering standby power mode\n");
+
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ if (err_mask)
+ ata_dev_err(dev, "STANDBY IMMEDIATE failed (err_mask=0x%x)\n",
+ err_mask);
+}
+
+/**
+ * ata_dev_power_set_active - Set a device power mode to active
+ * @dev: target device
+ *
+ * Issue a VERIFY command to enter to ensure that the device is in the
+ * active power mode. For a spun-down HDD (standby or idle power mode),
+ * the VERIFY command will complete after the disk spins up.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void ata_dev_power_set_active(struct ata_device *dev)
+{
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ /*
+ * Issue READ VERIFY SECTORS command for 1 sector at lba=0 only
+ * if supported by the device.
+ */
+ if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC)
+ return;
+
+ ata_tf_init(dev, &tf);
+ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+ tf.protocol = ATA_PROT_NODATA;
+ tf.command = ATA_CMD_VERIFY;
+ tf.nsect = 1;
+ if (dev->flags & ATA_DFLAG_LBA) {
+ tf.flags |= ATA_TFLAG_LBA;
+ tf.device |= ATA_LBA;
+ } else {
+ /* CHS */
+ tf.lbal = 0x1; /* sect */
+ }
+
+ ata_dev_notice(dev, "Entering active power mode\n");
+
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ if (err_mask)
+ ata_dev_err(dev, "VERIFY failed (err_mask=0x%x)\n",
+ err_mask);
+}
+
+/**
* ata_read_log_page - read a specific log page
* @dev: target device
* @log: log to read
@@ -2534,7 +2619,7 @@ static int ata_dev_config_lba(struct ata_device *dev)
{
const u16 *id = dev->id;
const char *lba_desc;
- char ncq_desc[24];
+ char ncq_desc[32];
int ret;
dev->flags |= ATA_DFLAG_LBA;
@@ -3063,144 +3148,6 @@ int ata_cable_sata(struct ata_port *ap)
EXPORT_SYMBOL_GPL(ata_cable_sata);
/**
- * ata_bus_probe - Reset and probe ATA bus
- * @ap: Bus to probe
- *
- * Master ATA bus probing function. Initiates a hardware-dependent
- * bus reset, then attempts to identify any devices found on
- * the bus.
- *
- * LOCKING:
- * PCI/etc. bus probe sem.
- *
- * RETURNS:
- * Zero on success, negative errno otherwise.
- */
-
-int ata_bus_probe(struct ata_port *ap)
-{
- unsigned int classes[ATA_MAX_DEVICES];
- int tries[ATA_MAX_DEVICES];
- int rc;
- struct ata_device *dev;
-
- ata_for_each_dev(dev, &ap->link, ALL)
- tries[dev->devno] = ATA_PROBE_MAX_TRIES;
-
- retry:
- ata_for_each_dev(dev, &ap->link, ALL) {
- /* If we issue an SRST then an ATA drive (not ATAPI)
- * may change configuration and be in PIO0 timing. If
- * we do a hard reset (or are coming from power on)
- * this is true for ATA or ATAPI. Until we've set a
- * suitable controller mode we should not touch the
- * bus as we may be talking too fast.
- */
- dev->pio_mode = XFER_PIO_0;
- dev->dma_mode = 0xff;
-
- /* If the controller has a pio mode setup function
- * then use it to set the chipset to rights. Don't
- * touch the DMA setup as that will be dealt with when
- * configuring devices.
- */
- if (ap->ops->set_piomode)
- ap->ops->set_piomode(ap, dev);
- }
-
- /* reset and determine device classes */
- ap->ops->phy_reset(ap);
-
- ata_for_each_dev(dev, &ap->link, ALL) {
- if (dev->class != ATA_DEV_UNKNOWN)
- classes[dev->devno] = dev->class;
- else
- classes[dev->devno] = ATA_DEV_NONE;
-
- dev->class = ATA_DEV_UNKNOWN;
- }
-
- /* read IDENTIFY page and configure devices. We have to do the identify
- specific sequence bass-ackwards so that PDIAG- is released by
- the slave device */
-
- ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
- if (tries[dev->devno])
- dev->class = classes[dev->devno];
-
- if (!ata_dev_enabled(dev))
- continue;
-
- rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
- dev->id);
- if (rc)
- goto fail;
- }
-
- /* Now ask for the cable type as PDIAG- should have been released */
- if (ap->ops->cable_detect)
- ap->cbl = ap->ops->cable_detect(ap);
-
- /* We may have SATA bridge glue hiding here irrespective of
- * the reported cable types and sensed types. When SATA
- * drives indicate we have a bridge, we don't know which end
- * of the link the bridge is which is a problem.
- */
- ata_for_each_dev(dev, &ap->link, ENABLED)
- if (ata_id_is_sata(dev->id))
- ap->cbl = ATA_CBL_SATA;
-
- /* After the identify sequence we can now set up the devices. We do
- this in the normal order so that the user doesn't get confused */
-
- ata_for_each_dev(dev, &ap->link, ENABLED) {
- ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
- rc = ata_dev_configure(dev);
- ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
- if (rc)
- goto fail;
- }
-
- /* configure transfer mode */
- rc = ata_set_mode(&ap->link, &dev);
- if (rc)
- goto fail;
-
- ata_for_each_dev(dev, &ap->link, ENABLED)
- return 0;
-
- return -ENODEV;
-
- fail:
- tries[dev->devno]--;
-
- switch (rc) {
- case -EINVAL:
- /* eeek, something went very wrong, give up */
- tries[dev->devno] = 0;
- break;
-
- case -ENODEV:
- /* give it just one more chance */
- tries[dev->devno] = min(tries[dev->devno], 1);
- fallthrough;
- case -EIO:
- if (tries[dev->devno] == 1) {
- /* This is the last chance, better to slow
- * down than lose it.
- */
- sata_down_spd_limit(&ap->link, 0);
- ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
- }
- }
-
- if (!tries[dev->devno])
- ata_dev_disable(dev);
-
- goto retry;
-}
-
-/**
* sata_print_link_status - Print SATA link status
* @link: SATA link to printk link status about
*
@@ -3782,7 +3729,7 @@ int ata_std_prereset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct ata_eh_context *ehc = &link->eh_context;
- const unsigned long *timing = sata_ehc_deb_timing(ehc);
+ const unsigned int *timing = sata_ehc_deb_timing(ehc);
int rc;
/* if we're about to do hardreset, nothing more to do */
@@ -3824,7 +3771,7 @@ EXPORT_SYMBOL_GPL(ata_std_prereset);
int sata_std_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
- const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
bool online;
int rc;
@@ -4213,10 +4160,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM },
+ { "Micron_1100_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
@@ -4874,126 +4823,100 @@ static void ata_verify_xfer(struct ata_queued_cmd *qc)
void ata_qc_complete(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
+ struct ata_device *dev = qc->dev;
+ struct ata_eh_info *ehi = &dev->link->eh_info;
/* Trigger the LED (if available) */
ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
- /* XXX: New EH and old EH use different mechanisms to
- * synchronize EH with regular execution path.
- *
- * In new EH, a qc owned by EH is marked with ATA_QCFLAG_EH.
- * Normal execution path is responsible for not accessing a
- * qc owned by EH. libata core enforces the rule by returning NULL
- * from ata_qc_from_tag() for qcs owned by EH.
+ /*
+ * In order to synchronize EH with the regular execution path, a qc that
+ * is owned by EH is marked with ATA_QCFLAG_EH.
*
- * Old EH depends on ata_qc_complete() nullifying completion
- * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
- * not synchronize with interrupt handler. Only PIO task is
- * taken care of.
+ * The normal execution path is responsible for not accessing a qc owned
+ * by EH. libata core enforces the rule by returning NULL from
+ * ata_qc_from_tag() for qcs owned by EH.
*/
- if (ap->ops->error_handler) {
- struct ata_device *dev = qc->dev;
- struct ata_eh_info *ehi = &dev->link->eh_info;
-
- if (unlikely(qc->err_mask))
- qc->flags |= ATA_QCFLAG_EH;
+ if (unlikely(qc->err_mask))
+ qc->flags |= ATA_QCFLAG_EH;
- /*
- * Finish internal commands without any further processing
- * and always with the result TF filled.
- */
- if (unlikely(ata_tag_internal(qc->tag))) {
- fill_result_tf(qc);
- trace_ata_qc_complete_internal(qc);
- __ata_qc_complete(qc);
- return;
- }
+ /*
+ * Finish internal commands without any further processing and always
+ * with the result TF filled.
+ */
+ if (unlikely(ata_tag_internal(qc->tag))) {
+ fill_result_tf(qc);
+ trace_ata_qc_complete_internal(qc);
+ __ata_qc_complete(qc);
+ return;
+ }
- /*
- * Non-internal qc has failed. Fill the result TF and
- * summon EH.
- */
- if (unlikely(qc->flags & ATA_QCFLAG_EH)) {
- fill_result_tf(qc);
- trace_ata_qc_complete_failed(qc);
- ata_qc_schedule_eh(qc);
- return;
- }
+ /* Non-internal qc has failed. Fill the result TF and summon EH. */
+ if (unlikely(qc->flags & ATA_QCFLAG_EH)) {
+ fill_result_tf(qc);
+ trace_ata_qc_complete_failed(qc);
+ ata_qc_schedule_eh(qc);
+ return;
+ }
- WARN_ON_ONCE(ata_port_is_frozen(ap));
+ WARN_ON_ONCE(ata_port_is_frozen(ap));
- /* read result TF if requested */
- if (qc->flags & ATA_QCFLAG_RESULT_TF)
- fill_result_tf(qc);
+ /* read result TF if requested */
+ if (qc->flags & ATA_QCFLAG_RESULT_TF)
+ fill_result_tf(qc);
- trace_ata_qc_complete_done(qc);
+ trace_ata_qc_complete_done(qc);
+ /*
+ * For CDL commands that completed without an error, check if we have
+ * sense data (ATA_SENSE is set). If we do, then the command may have
+ * been aborted by the device due to a limit timeout using the policy
+ * 0xD. For these commands, invoke EH to get the command sense data.
+ */
+ if (qc->flags & ATA_QCFLAG_HAS_CDL &&
+ qc->result_tf.status & ATA_SENSE) {
/*
- * For CDL commands that completed without an error, check if
- * we have sense data (ATA_SENSE is set). If we do, then the
- * command may have been aborted by the device due to a limit
- * timeout using the policy 0xD. For these commands, invoke EH
- * to get the command sense data.
+ * Tell SCSI EH to not overwrite scmd->result even if this
+ * command is finished with result SAM_STAT_GOOD.
*/
- if (qc->result_tf.status & ATA_SENSE &&
- ((ata_is_ncq(qc->tf.protocol) &&
- dev->flags & ATA_DFLAG_CDL_ENABLED) ||
- (!ata_is_ncq(qc->tf.protocol) &&
- ata_id_sense_reporting_enabled(dev->id)))) {
- /*
- * Tell SCSI EH to not overwrite scmd->result even if
- * this command is finished with result SAM_STAT_GOOD.
- */
- qc->scsicmd->flags |= SCMD_FORCE_EH_SUCCESS;
- qc->flags |= ATA_QCFLAG_EH_SUCCESS_CMD;
- ehi->dev_action[dev->devno] |= ATA_EH_GET_SUCCESS_SENSE;
-
- /*
- * set pending so that ata_qc_schedule_eh() does not
- * trigger fast drain, and freeze the port.
- */
- ap->pflags |= ATA_PFLAG_EH_PENDING;
- ata_qc_schedule_eh(qc);
- return;
- }
+ qc->scsicmd->flags |= SCMD_FORCE_EH_SUCCESS;
+ qc->flags |= ATA_QCFLAG_EH_SUCCESS_CMD;
+ ehi->dev_action[dev->devno] |= ATA_EH_GET_SUCCESS_SENSE;
- /* Some commands need post-processing after successful
- * completion.
+ /*
+ * set pending so that ata_qc_schedule_eh() does not trigger
+ * fast drain, and freeze the port.
*/
- switch (qc->tf.command) {
- case ATA_CMD_SET_FEATURES:
- if (qc->tf.feature != SETFEATURES_WC_ON &&
- qc->tf.feature != SETFEATURES_WC_OFF &&
- qc->tf.feature != SETFEATURES_RA_ON &&
- qc->tf.feature != SETFEATURES_RA_OFF)
- break;
- fallthrough;
- case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
- case ATA_CMD_SET_MULTI: /* multi_count changed */
- /* revalidate device */
- ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
- ata_port_schedule_eh(ap);
- break;
+ ap->pflags |= ATA_PFLAG_EH_PENDING;
+ ata_qc_schedule_eh(qc);
+ return;
+ }
- case ATA_CMD_SLEEP:
- dev->flags |= ATA_DFLAG_SLEEPING;
+ /* Some commands need post-processing after successful completion. */
+ switch (qc->tf.command) {
+ case ATA_CMD_SET_FEATURES:
+ if (qc->tf.feature != SETFEATURES_WC_ON &&
+ qc->tf.feature != SETFEATURES_WC_OFF &&
+ qc->tf.feature != SETFEATURES_RA_ON &&
+ qc->tf.feature != SETFEATURES_RA_OFF)
break;
- }
-
- if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
- ata_verify_xfer(qc);
+ fallthrough;
+ case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
+ case ATA_CMD_SET_MULTI: /* multi_count changed */
+ /* revalidate device */
+ ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
+ ata_port_schedule_eh(ap);
+ break;
- __ata_qc_complete(qc);
- } else {
- if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
- return;
+ case ATA_CMD_SLEEP:
+ dev->flags |= ATA_DFLAG_SLEEPING;
+ break;
+ }
- /* read result TF if failed or requested */
- if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
- fill_result_tf(qc);
+ if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
+ ata_verify_xfer(qc);
- __ata_qc_complete(qc);
- }
+ __ata_qc_complete(qc);
}
EXPORT_SYMBOL_GPL(ata_qc_complete);
@@ -5039,11 +4962,8 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
struct ata_link *link = qc->dev->link;
u8 prot = qc->tf.protocol;
- /* Make sure only one non-NCQ command is outstanding. The
- * check is skipped for old EH because it reuses active qc to
- * request ATAPI sense.
- */
- WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
+ /* Make sure only one non-NCQ command is outstanding. */
+ WARN_ON_ONCE(ata_tag_valid(link->active_tag));
if (ata_is_ncq(prot)) {
WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
@@ -5207,17 +5127,19 @@ static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
struct ata_link *link;
unsigned long flags;
- /* Previous resume operation might still be in
- * progress. Wait for PM_PENDING to clear.
+ spin_lock_irqsave(ap->lock, flags);
+
+ /*
+ * A previous PM operation might still be in progress. Wait for
+ * ATA_PFLAG_PM_PENDING to clear.
*/
if (ap->pflags & ATA_PFLAG_PM_PENDING) {
+ spin_unlock_irqrestore(ap->lock, flags);
ata_port_wait_eh(ap);
- WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
+ spin_lock_irqsave(ap->lock, flags);
}
- /* request PM ops to EH */
- spin_lock_irqsave(ap->lock, flags);
-
+ /* Request PM operation to EH */
ap->pm_mesg = mesg;
ap->pflags |= ATA_PFLAG_PM_PENDING;
ata_for_each_link(link, ap, HOST_FIRST) {
@@ -5229,10 +5151,8 @@ static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
spin_unlock_irqrestore(ap->lock, flags);
- if (!async) {
+ if (!async)
ata_port_wait_eh(ap);
- WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
- }
}
/*
@@ -5248,11 +5168,27 @@ static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
{
+ /*
+ * We are about to suspend the port, so we do not care about
+ * scsi_rescan_device() calls scheduled by previous resume operations.
+ * The next resume will schedule the rescan again. So cancel any rescan
+ * that is not done yet.
+ */
+ cancel_delayed_work_sync(&ap->scsi_rescan_task);
+
ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
}
static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
{
+ /*
+ * We are about to suspend the port, so we do not care about
+ * scsi_rescan_device() calls scheduled by previous resume operations.
+ * The next resume will schedule the rescan again. So cancel any rescan
+ * that is not done yet.
+ */
+ cancel_delayed_work_sync(&ap->scsi_rescan_task);
+
ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
}
@@ -5399,7 +5335,7 @@ EXPORT_SYMBOL_GPL(ata_host_resume);
#endif
const struct device_type ata_port_type = {
- .name = "ata_port",
+ .name = ATA_PORT_TYPE_NAME,
#ifdef CONFIG_PM
.pm = &ata_port_pm_ops,
#endif
@@ -5896,7 +5832,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
}
EXPORT_SYMBOL_GPL(ata_host_init);
-void __ata_port_probe(struct ata_port *ap)
+void ata_port_probe(struct ata_port *ap)
{
struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned long flags;
@@ -5914,20 +5850,7 @@ void __ata_port_probe(struct ata_port *ap)
spin_unlock_irqrestore(ap->lock, flags);
}
-
-int ata_port_probe(struct ata_port *ap)
-{
- int rc = 0;
-
- if (ap->ops->error_handler) {
- __ata_port_probe(ap);
- ata_port_wait_eh(ap);
- } else {
- rc = ata_bus_probe(ap);
- }
- return rc;
-}
-
+EXPORT_SYMBOL_GPL(ata_port_probe);
static void async_port_probe(void *data, async_cookie_t cookie)
{
@@ -5943,7 +5866,8 @@ static void async_port_probe(void *data, async_cookie_t cookie)
if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
async_synchronize_cookie(cookie);
- (void)ata_port_probe(ap);
+ ata_port_probe(ap);
+ ata_port_wait_eh(ap);
/* in order to keep device order, we need to synchronize at this point */
async_synchronize_cookie(cookie);
@@ -6130,14 +6054,30 @@ static void ata_port_detach(struct ata_port *ap)
struct ata_link *link;
struct ata_device *dev;
- if (!ap->ops->error_handler)
- goto skip_eh;
+ /* Wait for any ongoing EH */
+ ata_port_wait_eh(ap);
- /* tell EH we're leaving & flush EH */
+ mutex_lock(&ap->scsi_scan_mutex);
spin_lock_irqsave(ap->lock, flags);
+
+ /* Remove scsi devices */
+ ata_for_each_link(link, ap, HOST_FIRST) {
+ ata_for_each_dev(dev, link, ALL) {
+ if (dev->sdev) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ scsi_remove_device(dev->sdev);
+ spin_lock_irqsave(ap->lock, flags);
+ dev->sdev = NULL;
+ }
+ }
+ }
+
+ /* Tell EH to disable all devices */
ap->pflags |= ATA_PFLAG_UNLOADING;
ata_port_schedule_eh(ap);
+
spin_unlock_irqrestore(ap->lock, flags);
+ mutex_unlock(&ap->scsi_scan_mutex);
/* wait till EH commits suicide */
ata_port_wait_eh(ap);
@@ -6148,7 +6088,6 @@ static void ata_port_detach(struct ata_port *ap)
cancel_delayed_work_sync(&ap->hotplug_task);
cancel_delayed_work_sync(&ap->scsi_rescan_task);
- skip_eh:
/* clean up zpodd on port removal */
ata_for_each_link(link, ap, HOST_FIRST) {
ata_for_each_dev(dev, link, ALL) {
@@ -6684,7 +6623,7 @@ EXPORT_SYMBOL_GPL(ata_msleep);
* The final register value.
*/
u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
- unsigned long interval, unsigned long timeout)
+ unsigned int interval, unsigned int timeout)
{
unsigned long deadline;
u32 tmp;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 35e03679b0bf..5686353e442c 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -78,12 +78,12 @@ enum {
* are mostly for error handling, hotplug and those outlier devices that
* take an exceptionally long time to recover from reset.
*/
-static const unsigned long ata_eh_reset_timeouts[] = {
+static const unsigned int ata_eh_reset_timeouts[] = {
10000, /* most drives spin up by 10sec */
10000, /* > 99% working drives spin up before 20sec */
35000, /* give > 30 secs of idleness for outlier devices */
5000, /* and sweet one last chance */
- ULONG_MAX, /* > 1 min has elapsed, give up */
+ UINT_MAX, /* > 1 min has elapsed, give up */
};
static const unsigned int ata_eh_identify_timeouts[] = {
@@ -147,6 +147,8 @@ ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
.timeouts = ata_eh_other_timeouts, },
{ .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
.timeouts = ata_eh_flush_timeouts },
+ { .commands = CMDS(ATA_CMD_VERIFY),
+ .timeouts = ata_eh_reset_timeouts },
};
#undef CMDS
@@ -498,7 +500,19 @@ static void ata_eh_unload(struct ata_port *ap)
struct ata_device *dev;
unsigned long flags;
- /* Restore SControl IPM and SPD for the next driver and
+ /*
+ * Unless we are restarting, transition all enabled devices to
+ * standby power mode.
+ */
+ if (system_state != SYSTEM_RESTART) {
+ ata_for_each_link(link, ap, PMP_FIRST) {
+ ata_for_each_dev(dev, link, ENABLED)
+ ata_dev_power_set_standby(dev);
+ }
+ }
+
+ /*
+ * Restore SControl IPM and SPD for the next driver and
* disable attached devices.
*/
ata_for_each_link(link, ap, PMP_FIRST) {
@@ -571,13 +585,10 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
/* make sure sff pio task is not running */
ata_sff_flush_pio_task(ap);
- if (!ap->ops->error_handler)
- return;
-
/* synchronize with host lock and sort out timeouts */
/*
- * For new EH, all qcs are finished in one of three ways -
+ * For EH, all qcs are finished in one of three ways -
* normal completion, error completion, and SCSI timeout.
* Both completions can race against SCSI timeout. When normal
* completion wins, the qc never reaches EH. When error
@@ -659,100 +670,99 @@ EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
{
unsigned long flags;
+ struct ata_link *link;
- /* invoke error handler */
- if (ap->ops->error_handler) {
- struct ata_link *link;
-
- /* acquire EH ownership */
- ata_eh_acquire(ap);
+ /* acquire EH ownership */
+ ata_eh_acquire(ap);
repeat:
- /* kill fast drain timer */
- del_timer_sync(&ap->fastdrain_timer);
+ /* kill fast drain timer */
+ del_timer_sync(&ap->fastdrain_timer);
- /* process port resume request */
- ata_eh_handle_port_resume(ap);
+ /* process port resume request */
+ ata_eh_handle_port_resume(ap);
- /* fetch & clear EH info */
- spin_lock_irqsave(ap->lock, flags);
+ /* fetch & clear EH info */
+ spin_lock_irqsave(ap->lock, flags);
- ata_for_each_link(link, ap, HOST_FIRST) {
- struct ata_eh_context *ehc = &link->eh_context;
- struct ata_device *dev;
+ ata_for_each_link(link, ap, HOST_FIRST) {
+ struct ata_eh_context *ehc = &link->eh_context;
+ struct ata_device *dev;
- memset(&link->eh_context, 0, sizeof(link->eh_context));
- link->eh_context.i = link->eh_info;
- memset(&link->eh_info, 0, sizeof(link->eh_info));
+ memset(&link->eh_context, 0, sizeof(link->eh_context));
+ link->eh_context.i = link->eh_info;
+ memset(&link->eh_info, 0, sizeof(link->eh_info));
- ata_for_each_dev(dev, link, ENABLED) {
- int devno = dev->devno;
+ ata_for_each_dev(dev, link, ENABLED) {
+ int devno = dev->devno;
- ehc->saved_xfer_mode[devno] = dev->xfer_mode;
- if (ata_ncq_enabled(dev))
- ehc->saved_ncq_enabled |= 1 << devno;
- }
+ ehc->saved_xfer_mode[devno] = dev->xfer_mode;
+ if (ata_ncq_enabled(dev))
+ ehc->saved_ncq_enabled |= 1 << devno;
+
+ /* If we are resuming, wake up the device */
+ if (ap->pflags & ATA_PFLAG_RESUMING)
+ ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE;
}
+ }
- ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
- ap->pflags &= ~ATA_PFLAG_EH_PENDING;
- ap->excl_link = NULL; /* don't maintain exclusion over EH */
+ ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
+ ap->pflags &= ~ATA_PFLAG_EH_PENDING;
+ ap->excl_link = NULL; /* don't maintain exclusion over EH */
- spin_unlock_irqrestore(ap->lock, flags);
+ spin_unlock_irqrestore(ap->lock, flags);
- /* invoke EH, skip if unloading or suspended */
- if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
- ap->ops->error_handler(ap);
- else {
- /* if unloading, commence suicide */
- if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
- !(ap->pflags & ATA_PFLAG_UNLOADED))
- ata_eh_unload(ap);
- ata_eh_finish(ap);
- }
+ /* invoke EH, skip if unloading or suspended */
+ if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
+ ap->ops->error_handler(ap);
+ else {
+ /* if unloading, commence suicide */
+ if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
+ !(ap->pflags & ATA_PFLAG_UNLOADED))
+ ata_eh_unload(ap);
+ ata_eh_finish(ap);
+ }
- /* process port suspend request */
- ata_eh_handle_port_suspend(ap);
+ /* process port suspend request */
+ ata_eh_handle_port_suspend(ap);
- /* Exception might have happened after ->error_handler
- * recovered the port but before this point. Repeat
- * EH in such case.
- */
- spin_lock_irqsave(ap->lock, flags);
+ /*
+ * Exception might have happened after ->error_handler recovered the
+ * port but before this point. Repeat EH in such case.
+ */
+ spin_lock_irqsave(ap->lock, flags);
- if (ap->pflags & ATA_PFLAG_EH_PENDING) {
- if (--ap->eh_tries) {
- spin_unlock_irqrestore(ap->lock, flags);
- goto repeat;
- }
- ata_port_err(ap,
- "EH pending after %d tries, giving up\n",
- ATA_EH_MAX_TRIES);
- ap->pflags &= ~ATA_PFLAG_EH_PENDING;
+ if (ap->pflags & ATA_PFLAG_EH_PENDING) {
+ if (--ap->eh_tries) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ goto repeat;
}
+ ata_port_err(ap,
+ "EH pending after %d tries, giving up\n",
+ ATA_EH_MAX_TRIES);
+ ap->pflags &= ~ATA_PFLAG_EH_PENDING;
+ }
- /* this run is complete, make sure EH info is clear */
- ata_for_each_link(link, ap, HOST_FIRST)
- memset(&link->eh_info, 0, sizeof(link->eh_info));
+ /* this run is complete, make sure EH info is clear */
+ ata_for_each_link(link, ap, HOST_FIRST)
+ memset(&link->eh_info, 0, sizeof(link->eh_info));
- /* end eh (clear host_eh_scheduled) while holding
- * ap->lock such that if exception occurs after this
- * point but before EH completion, SCSI midlayer will
- * re-initiate EH.
- */
- ap->ops->end_eh(ap);
+ /*
+ * end eh (clear host_eh_scheduled) while holding ap->lock such that if
+ * exception occurs after this point but before EH completion, SCSI
+ * midlayer will re-initiate EH.
+ */
+ ap->ops->end_eh(ap);
- spin_unlock_irqrestore(ap->lock, flags);
- ata_eh_release(ap);
- } else {
- WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
- ap->ops->eng_timeout(ap);
- }
+ spin_unlock_irqrestore(ap->lock, flags);
+ ata_eh_release(ap);
scsi_eh_flush_done_q(&ap->eh_done_q);
/* clean up */
spin_lock_irqsave(ap->lock, flags);
+ ap->pflags &= ~ATA_PFLAG_RESUMING;
+
if (ap->pflags & ATA_PFLAG_LOADING)
ap->pflags &= ~ATA_PFLAG_LOADING;
else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) &&
@@ -912,8 +922,6 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- WARN_ON(!ap->ops->error_handler);
-
qc->flags |= ATA_QCFLAG_EH;
ata_eh_set_pending(ap, 1);
@@ -934,8 +942,6 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
*/
void ata_std_sched_eh(struct ata_port *ap)
{
- WARN_ON(!ap->ops->error_handler);
-
if (ap->pflags & ATA_PFLAG_INITIALIZING)
return;
@@ -989,8 +995,6 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
struct ata_queued_cmd *qc;
int tag, nr_aborted = 0;
- WARN_ON(!ap->ops->error_handler);
-
/* we're gonna abort all commands, no need for fast drain */
ata_eh_set_pending(ap, 0);
@@ -1065,8 +1069,6 @@ EXPORT_SYMBOL_GPL(ata_port_abort);
*/
static void __ata_port_freeze(struct ata_port *ap)
{
- WARN_ON(!ap->ops->error_handler);
-
if (ap->ops->freeze)
ap->ops->freeze(ap);
@@ -1091,8 +1093,6 @@ static void __ata_port_freeze(struct ata_port *ap)
*/
int ata_port_freeze(struct ata_port *ap)
{
- WARN_ON(!ap->ops->error_handler);
-
__ata_port_freeze(ap);
return ata_port_abort(ap);
@@ -1112,9 +1112,6 @@ void ata_eh_freeze_port(struct ata_port *ap)
{
unsigned long flags;
- if (!ap->ops->error_handler)
- return;
-
spin_lock_irqsave(ap->lock, flags);
__ata_port_freeze(ap);
spin_unlock_irqrestore(ap->lock, flags);
@@ -1134,9 +1131,6 @@ void ata_eh_thaw_port(struct ata_port *ap)
{
unsigned long flags;
- if (!ap->ops->error_handler)
- return;
-
spin_lock_irqsave(ap->lock, flags);
ap->pflags &= ~ATA_PFLAG_FROZEN;
@@ -1244,6 +1238,13 @@ void ata_eh_detach_dev(struct ata_device *dev)
struct ata_eh_context *ehc = &link->eh_context;
unsigned long flags;
+ /*
+ * If the device is still enabled, transition it to standby power mode
+ * (i.e. spin down HDDs).
+ */
+ if (ata_dev_enabled(dev))
+ ata_dev_power_set_standby(dev);
+
ata_dev_disable(dev);
spin_lock_irqsave(ap->lock, flags);
@@ -2331,7 +2332,7 @@ static void ata_eh_link_report(struct ata_link *link)
struct ata_eh_context *ehc = &link->eh_context;
struct ata_queued_cmd *qc;
const char *frozen, *desc;
- char tries_buf[6] = "";
+ char tries_buf[16] = "";
int tag, nr_failed = 0;
if (ehc->i.flags & ATA_EHI_QUIET)
@@ -2575,7 +2576,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
/*
* Prepare to reset
*/
- while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
+ while (ata_eh_reset_timeouts[max_tries] != UINT_MAX)
max_tries++;
if (link->flags & ATA_LFLAG_RST_ONCE)
max_tries = 1;
@@ -2822,23 +2823,13 @@ int ata_eh_reset(struct ata_link *link, int classify,
}
}
- /*
- * Some controllers can't be frozen very well and may set spurious
- * error conditions during reset. Clear accumulated error
- * information and re-thaw the port if frozen. As reset is the
- * final recovery action and we cross check link onlineness against
- * device classification later, no hotplug event is lost by this.
- */
+ /* clear cached SError */
spin_lock_irqsave(link->ap->lock, flags);
- memset(&link->eh_info, 0, sizeof(link->eh_info));
+ link->eh_info.serror = 0;
if (slave)
- memset(&slave->eh_info, 0, sizeof(link->eh_info));
- ap->pflags &= ~ATA_PFLAG_EH_PENDING;
+ slave->eh_info.serror = 0;
spin_unlock_irqrestore(link->ap->lock, flags);
- if (ata_port_is_frozen(ap))
- ata_eh_thaw_port(ap);
-
/*
* Make sure onlineness and classification result correspond.
* Hotplug could have happened during reset and some
@@ -3052,6 +3043,15 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
if (ehc->i.flags & ATA_EHI_DID_RESET)
readid_flags |= ATA_READID_POSTRESET;
+ /*
+ * When resuming, before executing any command, make sure to
+ * transition the device to the active power mode.
+ */
+ if ((action & ATA_EH_SET_ACTIVE) && ata_dev_enabled(dev)) {
+ ata_dev_power_set_active(dev);
+ ata_eh_done(link, dev, ATA_EH_SET_ACTIVE);
+ }
+
if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
WARN_ON(dev->class == ATA_DEV_PMP);
@@ -4025,6 +4025,7 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
unsigned long flags;
int rc = 0;
struct ata_device *dev;
+ struct ata_link *link;
/* are we suspending? */
spin_lock_irqsave(ap->lock, flags);
@@ -4037,6 +4038,12 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
+ /* Set all devices attached to the port in standby mode */
+ ata_for_each_link(link, ap, HOST_FIRST) {
+ ata_for_each_dev(dev, link, ENABLED)
+ ata_dev_power_set_standby(dev);
+ }
+
/*
* If we have a ZPODD attached, check its zero
* power ready status before the port is frozen.
@@ -4119,6 +4126,7 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
/* update the flags */
spin_lock_irqsave(ap->lock, flags);
ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
+ ap->pflags |= ATA_PFLAG_RESUMING;
spin_unlock_irqrestore(ap->lock, flags);
}
#endif /* CONFIG_PM */
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 85e279a12f62..a701e1538482 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -19,11 +19,11 @@
#include "libata-transport.h"
/* debounce timing parameters in msecs { interval, duration, timeout } */
-const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
+const unsigned int sata_deb_timing_normal[] = { 5, 100, 2000 };
EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
-const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
+const unsigned int sata_deb_timing_hotplug[] = { 25, 500, 2000 };
EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
-const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
+const unsigned int sata_deb_timing_long[] = { 100, 2000, 5000 };
EXPORT_SYMBOL_GPL(sata_deb_timing_long);
/**
@@ -232,11 +232,11 @@ EXPORT_SYMBOL_GPL(ata_tf_from_fis);
* RETURNS:
* 0 on success, -errno on failure.
*/
-int sata_link_debounce(struct ata_link *link, const unsigned long *params,
+int sata_link_debounce(struct ata_link *link, const unsigned int *params,
unsigned long deadline)
{
- unsigned long interval = params[0];
- unsigned long duration = params[1];
+ unsigned int interval = params[0];
+ unsigned int duration = params[1];
unsigned long last_jiffies, t;
u32 last, cur;
int rc;
@@ -295,7 +295,7 @@ EXPORT_SYMBOL_GPL(sata_link_debounce);
* RETURNS:
* 0 on success, -errno on failure.
*/
-int sata_link_resume(struct ata_link *link, const unsigned long *params,
+int sata_link_resume(struct ata_link *link, const unsigned int *params,
unsigned long deadline)
{
int tries = ATA_LINK_RESUME_TRIES;
@@ -396,10 +396,23 @@ int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
case ATA_LPM_MED_POWER_WITH_DIPM:
case ATA_LPM_MIN_POWER_WITH_PARTIAL:
case ATA_LPM_MIN_POWER:
- if (ata_link_nr_enabled(link) > 0)
- /* no restrictions on LPM transitions */
+ if (ata_link_nr_enabled(link) > 0) {
+ /* assume no restrictions on LPM transitions */
scontrol &= ~(0x7 << 8);
- else {
+
+ /*
+ * If the controller does not support partial, slumber,
+ * or devsleep, then disallow these transitions.
+ */
+ if (link->ap->host->flags & ATA_HOST_NO_PART)
+ scontrol |= (0x1 << 8);
+
+ if (link->ap->host->flags & ATA_HOST_NO_SSC)
+ scontrol |= (0x2 << 8);
+
+ if (link->ap->host->flags & ATA_HOST_NO_DEVSLP)
+ scontrol |= (0x4 << 8);
+ } else {
/* empty port, power off */
scontrol &= ~0xf;
scontrol |= (0x1 << 2);
@@ -528,7 +541,7 @@ EXPORT_SYMBOL_GPL(sata_set_spd);
* RETURNS:
* 0 on success, -errno otherwise.
*/
-int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
+int sata_link_hardreset(struct ata_link *link, const unsigned int *timing,
unsigned long deadline,
bool *online, int (*check_ready)(struct ata_link *))
{
@@ -1139,92 +1152,12 @@ struct ata_port *ata_sas_port_alloc(struct ata_host *host,
ap->flags |= port_info->flags;
ap->ops = port_info->port_ops;
ap->cbl = ATA_CBL_SATA;
+ ap->print_id = atomic_inc_return(&ata_print_id);
return ap;
}
EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
-/**
- * ata_sas_port_start - Set port up for dma.
- * @ap: Port to initialize
- *
- * Called just after data structures for each port are
- * initialized.
- *
- * May be used as the port_start() entry in ata_port_operations.
- *
- * LOCKING:
- * Inherited from caller.
- */
-int ata_sas_port_start(struct ata_port *ap)
-{
- /*
- * the port is marked as frozen at allocation time, but if we don't
- * have new eh, we won't thaw it
- */
- if (!ap->ops->error_handler)
- ap->pflags &= ~ATA_PFLAG_FROZEN;
- return 0;
-}
-EXPORT_SYMBOL_GPL(ata_sas_port_start);
-
-/**
- * ata_sas_port_stop - Undo ata_sas_port_start()
- * @ap: Port to shut down
- *
- * May be used as the port_stop() entry in ata_port_operations.
- *
- * LOCKING:
- * Inherited from caller.
- */
-
-void ata_sas_port_stop(struct ata_port *ap)
-{
-}
-EXPORT_SYMBOL_GPL(ata_sas_port_stop);
-
-/**
- * ata_sas_async_probe - simply schedule probing and return
- * @ap: Port to probe
- *
- * For batch scheduling of probe for sas attached ata devices, assumes
- * the port has already been through ata_sas_port_init()
- */
-void ata_sas_async_probe(struct ata_port *ap)
-{
- __ata_port_probe(ap);
-}
-EXPORT_SYMBOL_GPL(ata_sas_async_probe);
-
-int ata_sas_sync_probe(struct ata_port *ap)
-{
- return ata_port_probe(ap);
-}
-EXPORT_SYMBOL_GPL(ata_sas_sync_probe);
-
-
-/**
- * ata_sas_port_init - Initialize a SATA device
- * @ap: SATA port to initialize
- *
- * LOCKING:
- * PCI/etc. bus probe sem.
- *
- * RETURNS:
- * Zero on success, non-zero on error.
- */
-
-int ata_sas_port_init(struct ata_port *ap)
-{
- int rc = ap->ops->port_start(ap);
-
- if (rc)
- return rc;
- ap->print_id = atomic_inc_return(&ata_print_id);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ata_sas_port_init);
-
int ata_sas_tport_add(struct device *parent, struct ata_port *ap)
{
return ata_tport_add(parent, ap);
@@ -1238,20 +1171,6 @@ void ata_sas_tport_delete(struct ata_port *ap)
EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
/**
- * ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc
- * @ap: SATA port to destroy
- *
- */
-
-void ata_sas_port_destroy(struct ata_port *ap)
-{
- if (ap->ops->port_stop)
- ap->ops->port_stop(ap);
- kfree(ap);
-}
-EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
-
-/**
* ata_sas_slave_configure - Default slave_config routine for libata devices
* @sdev: SCSI device to configure
* @ap: ATA port to which SCSI device is attached
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index c6ece32de8e3..a371b497035e 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -135,11 +135,11 @@ static ssize_t ata_scsi_park_store(struct device *device,
struct scsi_device *sdev = to_scsi_device(device);
struct ata_port *ap;
struct ata_device *dev;
- long int input;
+ int input;
unsigned long flags;
int rc;
- rc = kstrtol(buf, 10, &input);
+ rc = kstrtoint(buf, 10, &input);
if (rc)
return rc;
if (input < -2)
@@ -710,47 +710,6 @@ static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
}
/**
- * ata_dump_status - user friendly display of error info
- * @ap: the port in question
- * @tf: ptr to filled out taskfile
- *
- * Decode and dump the ATA error/status registers for the user so
- * that they have some idea what really happened at the non
- * make-believe layer.
- *
- * LOCKING:
- * inherited from caller
- */
-static void ata_dump_status(struct ata_port *ap, struct ata_taskfile *tf)
-{
- u8 stat = tf->status, err = tf->error;
-
- if (stat & ATA_BUSY) {
- ata_port_warn(ap, "status=0x%02x {Busy} ", stat);
- } else {
- ata_port_warn(ap, "status=0x%02x { %s%s%s%s%s%s%s} ", stat,
- stat & ATA_DRDY ? "DriveReady " : "",
- stat & ATA_DF ? "DeviceFault " : "",
- stat & ATA_DSC ? "SeekComplete " : "",
- stat & ATA_DRQ ? "DataRequest " : "",
- stat & ATA_CORR ? "CorrectedError " : "",
- stat & ATA_SENSE ? "Sense " : "",
- stat & ATA_ERR ? "Error " : "");
- if (err)
- ata_port_warn(ap, "error=0x%02x {%s%s%s%s%s%s", err,
- err & ATA_ABORTED ?
- "DriveStatusError " : "",
- err & ATA_ICRC ?
- (err & ATA_ABORTED ?
- "BadCRC " : "Sector ") : "",
- err & ATA_UNC ? "UncorrectableError " : "",
- err & ATA_IDNF ? "SectorIdNotFound " : "",
- err & ATA_TRK0NF ? "TrackZeroNotFound " : "",
- err & ATA_AMNF ? "AddrMarkNotFound " : "");
- }
-}
-
-/**
* ata_to_sense_error - convert ATA error to SCSI error
* @id: ATA device number
* @drv_stat: value contained in ATA status register
@@ -758,7 +717,6 @@ static void ata_dump_status(struct ata_port *ap, struct ata_taskfile *tf)
* @sk: the sense key we'll fill out
* @asc: the additional sense code we'll fill out
* @ascq: the additional sense code qualifier we'll fill out
- * @verbose: be verbose
*
* Converts an ATA error into a SCSI error. Fill out pointers to
* SK, ASC, and ASCQ bytes for later use in fixed or descriptor
@@ -768,7 +726,7 @@ static void ata_dump_status(struct ata_port *ap, struct ata_taskfile *tf)
* spin_lock_irqsave(host lock)
*/
static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
- u8 *asc, u8 *ascq, int verbose)
+ u8 *asc, u8 *ascq)
{
int i;
@@ -847,7 +805,7 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
*sk = sense_table[i][1];
*asc = sense_table[i][2];
*ascq = sense_table[i][3];
- goto translate_done;
+ return;
}
}
}
@@ -862,7 +820,7 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
*sk = stat_table[i][1];
*asc = stat_table[i][2];
*ascq = stat_table[i][3];
- goto translate_done;
+ return;
}
}
@@ -873,12 +831,6 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
*sk = ABORTED_COMMAND;
*asc = 0x00;
*ascq = 0x00;
-
- translate_done:
- if (verbose)
- pr_err("ata%u: translated ATA stat/err 0x%02x/%02x to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
- id, drv_stat, drv_err, *sk, *asc, *ascq);
- return;
}
/*
@@ -904,7 +856,6 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
struct ata_taskfile *tf = &qc->result_tf;
unsigned char *sb = cmd->sense_buffer;
unsigned char *desc = sb + 8;
- int verbose = qc->ap->ops->error_handler == NULL;
u8 sense_key, asc, ascq;
memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
@@ -916,7 +867,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
if (qc->err_mask ||
tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
ata_to_sense_error(qc->ap->print_id, tf->status, tf->error,
- &sense_key, &asc, &ascq, verbose);
+ &sense_key, &asc, &ascq);
ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq);
} else {
/*
@@ -999,7 +950,6 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
struct scsi_cmnd *cmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->result_tf;
unsigned char *sb = cmd->sense_buffer;
- int verbose = qc->ap->ops->error_handler == NULL;
u64 block;
u8 sense_key, asc, ascq;
@@ -1017,7 +967,7 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
if (qc->err_mask ||
tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
ata_to_sense_error(qc->ap->print_id, tf->status, tf->error,
- &sense_key, &asc, &ascq, verbose);
+ &sense_key, &asc, &ascq);
ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq);
} else {
/* Could not decode error */
@@ -1100,14 +1050,13 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
}
} else {
sdev->sector_size = ata_id_logical_sector_size(dev->id);
+
/*
- * Stop the drive on suspend but do not issue START STOP UNIT
- * on resume as this is not necessary and may fail: the device
- * will be woken up by ata_port_pm_resume() with a port reset
- * and device revalidation.
+ * Ask the sd driver to issue START STOP UNIT on runtime suspend
+ * and resume only. For system level suspend/resume, devices
+ * power state is handled directly by libata EH.
*/
- sdev->manage_start_stop = 1;
- sdev->no_start_on_resume = 1;
+ sdev->manage_runtime_start_stop = true;
}
/*
@@ -1140,6 +1089,42 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
}
/**
+ * ata_scsi_slave_alloc - Early setup of SCSI device
+ * @sdev: SCSI device to examine
+ *
+ * This is called from scsi_alloc_sdev() when the scsi device
+ * associated with an ATA device is scanned on a port.
+ *
+ * LOCKING:
+ * Defined by SCSI layer. We don't really care.
+ */
+
+int ata_scsi_slave_alloc(struct scsi_device *sdev)
+{
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ struct device_link *link;
+
+ ata_scsi_sdev_config(sdev);
+
+ /*
+ * Create a link from the ata_port device to the scsi device to ensure
+ * that PM does suspend/resume in the correct order: the scsi device is
+ * consumer (child) and the ata port the supplier (parent).
+ */
+ link = device_link_add(&sdev->sdev_gendev, &ap->tdev,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
+ if (!link) {
+ ata_port_err(ap, "Failed to create link to scsi device %s\n",
+ dev_name(&sdev->sdev_gendev));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
+
+/**
* ata_scsi_slave_config - Set SCSI device attributes
* @sdev: SCSI device to examine
*
@@ -1155,14 +1140,11 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
- int rc = 0;
-
- ata_scsi_sdev_config(sdev);
if (dev)
- rc = ata_scsi_dev_config(sdev, dev);
+ return ata_scsi_dev_config(sdev, dev);
- return rc;
+ return 0;
}
EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
@@ -1186,8 +1168,7 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
unsigned long flags;
struct ata_device *dev;
- if (!ap->ops->error_handler)
- return;
+ device_link_remove(&sdev->sdev_gendev, &ap->tdev);
spin_lock_irqsave(ap->lock, flags);
dev = __ata_scsi_find_dev(ap, sdev);
@@ -1248,7 +1229,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
}
if (cdb[4] & 0x1) {
- tf->nsect = 1; /* 1 sector, lba=0 */
+ tf->nsect = 1; /* 1 sector, lba=0 */
if (qc->dev->flags & ATA_DFLAG_LBA) {
tf->flags |= ATA_TFLAG_LBA;
@@ -1264,7 +1245,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
tf->lbah = 0x0; /* cyl high */
}
- tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
+ tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
} else {
/* Some odd clown BIOSen issue spindown on power off (ACPI S4
* or S5) causing some drives to spin up and down again.
@@ -1274,7 +1255,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
goto skip;
if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
- system_entering_hibernation())
+ system_entering_hibernation())
goto skip;
/* Issue ATA STANDBY IMMEDIATE command */
@@ -1675,7 +1656,6 @@ static void ata_qc_done(struct ata_queued_cmd *qc)
static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
{
- struct ata_port *ap = qc->ap;
struct scsi_cmnd *cmd = qc->scsicmd;
u8 *cdb = cmd->cmnd;
int need_sense = (qc->err_mask != 0) &&
@@ -1699,9 +1679,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
/* Keep the SCSI ML and status byte, clear host byte. */
cmd->result &= 0x0000ffff;
- if (need_sense && !ap->ops->error_handler)
- ata_dump_status(ap, &qc->result_tf);
-
ata_qc_done(qc);
}
@@ -1892,6 +1869,9 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
hdr[2] = 0x7; /* claim SPC-5 version compatibility */
}
+ if (args->dev->flags & ATA_DFLAG_CDL)
+ hdr[2] = 0xd; /* claim SPC-6 version compatibility */
+
memcpy(rbuf, hdr, sizeof(hdr));
memcpy(&rbuf[8], "ATA ", 8);
ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
@@ -2608,71 +2588,6 @@ static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
return 0;
}
-static void atapi_sense_complete(struct ata_queued_cmd *qc)
-{
- if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
- /* FIXME: not quite right; we don't want the
- * translation of taskfile registers into
- * a sense descriptors, since that's only
- * correct for ATA, not ATAPI
- */
- ata_gen_passthru_sense(qc);
- }
-
- ata_qc_done(qc);
-}
-
-/* is it pointless to prefer PIO for "safety reasons"? */
-static inline int ata_pio_use_silly(struct ata_port *ap)
-{
- return (ap->flags & ATA_FLAG_PIO_DMA);
-}
-
-static void atapi_request_sense(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct scsi_cmnd *cmd = qc->scsicmd;
-
- memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
-
-#ifdef CONFIG_ATA_SFF
- if (ap->ops->sff_tf_read)
- ap->ops->sff_tf_read(ap, &qc->tf);
-#endif
-
- /* fill these in, for the case where they are -not- overwritten */
- cmd->sense_buffer[0] = 0x70;
- cmd->sense_buffer[2] = qc->tf.error >> 4;
-
- ata_qc_reinit(qc);
-
- /* setup sg table and init transfer direction */
- sg_init_one(&qc->sgent, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
- ata_sg_init(qc, &qc->sgent, 1);
- qc->dma_dir = DMA_FROM_DEVICE;
-
- memset(&qc->cdb, 0, qc->dev->cdb_len);
- qc->cdb[0] = REQUEST_SENSE;
- qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
-
- qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
- qc->tf.command = ATA_CMD_PACKET;
-
- if (ata_pio_use_silly(ap)) {
- qc->tf.protocol = ATAPI_PROT_DMA;
- qc->tf.feature |= ATAPI_PKT_DMA;
- } else {
- qc->tf.protocol = ATAPI_PROT_PIO;
- qc->tf.lbam = SCSI_SENSE_BUFFERSIZE;
- qc->tf.lbah = 0;
- }
- qc->nbytes = SCSI_SENSE_BUFFERSIZE;
-
- qc->complete_fn = atapi_sense_complete;
-
- ata_qc_issue(qc);
-}
-
/*
* ATAPI devices typically report zero for their SCSI version, and sometimes
* deviate from the spec WRT response data format. If SCSI version is
@@ -2698,9 +2613,8 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
struct scsi_cmnd *cmd = qc->scsicmd;
unsigned int err_mask = qc->err_mask;
- /* handle completion from new EH */
- if (unlikely(qc->ap->ops->error_handler &&
- (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
+ /* handle completion from EH */
+ if (unlikely(err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID)) {
if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
/* FIXME: not quite right; we don't want the
@@ -2732,23 +2646,10 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
return;
}
- /* successful completion or old EH failure path */
- if (unlikely(err_mask & AC_ERR_DEV)) {
- cmd->result = SAM_STAT_CHECK_CONDITION;
- atapi_request_sense(qc);
- return;
- } else if (unlikely(err_mask)) {
- /* FIXME: not quite right; we don't want the
- * translation of taskfile registers into
- * a sense descriptors, since that's only
- * correct for ATA, not ATAPI
- */
- ata_gen_passthru_sense(qc);
- } else {
- if (cmd->cmnd[0] == INQUIRY && (cmd->cmnd[1] & 0x03) == 0)
- atapi_fixup_inquiry(cmd);
- cmd->result = SAM_STAT_GOOD;
- }
+ /* successful completion path */
+ if (cmd->cmnd[0] == INQUIRY && (cmd->cmnd[1] & 0x03) == 0)
+ atapi_fixup_inquiry(cmd);
+ cmd->result = SAM_STAT_GOOD;
ata_qc_done(qc);
}
@@ -4448,7 +4349,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
break;
case MAINTENANCE_IN:
- if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES)
+ if ((scsicmd[1] & 0x1f) == MI_REPORT_SUPPORTED_OPERATION_CODES)
ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
else
ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
@@ -4797,9 +4698,6 @@ int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
unsigned long flags;
int devno, rc = 0;
- if (!ap->ops->error_handler)
- return -EOPNOTSUPP;
-
if (lun != SCAN_WILD_CARD && lun)
return -EINVAL;
@@ -4861,7 +4759,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
struct ata_link *link;
struct ata_device *dev;
unsigned long flags;
- bool delay_rescan = false;
+ int ret = 0;
mutex_lock(&ap->scsi_scan_mutex);
spin_lock_irqsave(ap->lock, flags);
@@ -4870,37 +4768,34 @@ void ata_scsi_dev_rescan(struct work_struct *work)
ata_for_each_dev(dev, link, ENABLED) {
struct scsi_device *sdev = dev->sdev;
+ /*
+ * If the port was suspended before this was scheduled,
+ * bail out.
+ */
+ if (ap->pflags & ATA_PFLAG_SUSPENDED)
+ goto unlock;
+
if (!sdev)
continue;
if (scsi_device_get(sdev))
continue;
- /*
- * If the rescan work was scheduled because of a resume
- * event, the port is already fully resumed, but the
- * SCSI device may not yet be fully resumed. In such
- * case, executing scsi_rescan_device() may cause a
- * deadlock with the PM code on device_lock(). Prevent
- * this by giving up and retrying rescan after a short
- * delay.
- */
- delay_rescan = sdev->sdev_gendev.power.is_suspended;
- if (delay_rescan) {
- scsi_device_put(sdev);
- break;
- }
-
spin_unlock_irqrestore(ap->lock, flags);
- scsi_rescan_device(&(sdev->sdev_gendev));
+ ret = scsi_rescan_device(sdev);
scsi_device_put(sdev);
spin_lock_irqsave(ap->lock, flags);
+
+ if (ret)
+ goto unlock;
}
}
+unlock:
spin_unlock_irqrestore(ap->lock, flags);
mutex_unlock(&ap->scsi_scan_mutex);
- if (delay_rescan)
+ /* Reschedule with a delay if scsi_rescan_device() returned an error */
+ if (ret)
schedule_delayed_work(&ap->scsi_rescan_task,
msecs_to_jiffies(5));
}
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 9d28badfe41d..8fcc622fcb3d 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -883,31 +883,21 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
{
struct ata_port *ap = qc->ap;
- if (ap->ops->error_handler) {
- if (in_wq) {
- /* EH might have kicked in while host lock is
- * released.
- */
- qc = ata_qc_from_tag(ap, qc->tag);
- if (qc) {
- if (likely(!(qc->err_mask & AC_ERR_HSM))) {
- ata_sff_irq_on(ap);
- ata_qc_complete(qc);
- } else
- ata_port_freeze(ap);
- }
- } else {
- if (likely(!(qc->err_mask & AC_ERR_HSM)))
+ if (in_wq) {
+ /* EH might have kicked in while host lock is released. */
+ qc = ata_qc_from_tag(ap, qc->tag);
+ if (qc) {
+ if (likely(!(qc->err_mask & AC_ERR_HSM))) {
+ ata_sff_irq_on(ap);
ata_qc_complete(qc);
- else
+ } else
ata_port_freeze(ap);
}
} else {
- if (in_wq) {
- ata_sff_irq_on(ap);
- ata_qc_complete(qc);
- } else
+ if (likely(!(qc->err_mask & AC_ERR_HSM)))
ata_qc_complete(qc);
+ else
+ ata_port_freeze(ap);
}
}
@@ -1971,7 +1961,7 @@ int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_eh_context *ehc = &link->eh_context;
- const unsigned long *timing = sata_ehc_deb_timing(ehc);
+ const unsigned int *timing = sata_ehc_deb_timing(ehc);
bool online;
int rc;
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index e4fb9d1b9b39..3e49a877500e 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -266,6 +266,10 @@ void ata_tport_delete(struct ata_port *ap)
put_device(dev);
}
+static const struct device_type ata_port_sas_type = {
+ .name = ATA_PORT_TYPE_NAME,
+};
+
/** ata_tport_add - initialize a transport ATA port structure
*
* @parent: parent device
@@ -283,7 +287,10 @@ int ata_tport_add(struct device *parent,
struct device *dev = &ap->tdev;
device_initialize(dev);
- dev->type = &ata_port_type;
+ if (ap->flags & ATA_FLAG_SAS_HOST)
+ dev->type = &ata_port_sas_type;
+ else
+ dev->type = &ata_port_type;
dev->parent = parent;
ata_host_get(ap->host);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index cf993885d2b2..05ac80da8ebc 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -30,6 +30,8 @@ enum {
ATA_DNXFER_QUIET = (1 << 31),
};
+#define ATA_PORT_TYPE_NAME "ata_port"
+
extern atomic_t ata_print_id;
extern int atapi_passthru16;
extern int libata_fua;
@@ -60,6 +62,8 @@ extern int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags);
extern int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
unsigned int readid_flags);
extern int ata_dev_configure(struct ata_device *dev);
+extern void ata_dev_power_set_standby(struct ata_device *dev);
+extern void ata_dev_power_set_active(struct ata_device *dev);
extern int sata_down_spd_limit(struct ata_link *link, u32 spd_limit);
extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
extern unsigned int ata_dev_set_feature(struct ata_device *dev,
@@ -78,8 +82,6 @@ extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
extern struct ata_port *ata_port_alloc(struct ata_host *host);
extern const char *sata_spd_string(unsigned int spd);
-extern int ata_port_probe(struct ata_port *ap);
-extern void __ata_port_probe(struct ata_port *ap);
extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
u8 page, void *buf, unsigned int sectors);
@@ -124,7 +126,6 @@ extern void ata_scsi_media_change_notify(struct ata_device *dev);
extern void ata_scsi_hotplug(struct work_struct *work);
extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
extern void ata_scsi_dev_rescan(struct work_struct *work);
-extern int ata_bus_probe(struct ata_port *ap);
extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
unsigned int id, u64 lun);
void ata_scsi_sdev_config(struct scsi_device *sdev);
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 314eaa167954..d0c6924d25b6 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -917,15 +917,13 @@ static int arasan_cf_probe(struct platform_device *pdev)
return ret;
}
-static int arasan_cf_remove(struct platform_device *pdev)
+static void arasan_cf_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct arasan_cf_dev *acdev = host->ports[0]->private_data;
ata_host_detach(host);
cf_exit(acdev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -966,7 +964,7 @@ MODULE_DEVICE_TABLE(of, arasan_cf_id_table);
static struct platform_driver arasan_cf_driver = {
.probe = arasan_cf_probe,
- .remove = arasan_cf_remove,
+ .remove_new = arasan_cf_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &arasan_cf_pm_ops,
diff --git a/drivers/ata/pata_buddha.c b/drivers/ata/pata_buddha.c
index 49bc619b83e2..c36ee991d5e5 100644
--- a/drivers/ata/pata_buddha.c
+++ b/drivers/ata/pata_buddha.c
@@ -27,7 +27,6 @@
#include <asm/amigahw.h>
#include <asm/amigaints.h>
-#include <asm/ide.h>
#include <asm/setup.h>
#define DRV_NAME "pata_buddha"
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index c6e043e05d43..c84a20892f1b 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -40,6 +40,7 @@
#include <linux/ata.h>
#include <linux/libata.h>
#include <linux/platform_device.h>
+#include <linux/sys_soc.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/ktime.h>
@@ -910,6 +911,12 @@ static struct ata_port_operations ep93xx_pata_port_ops = {
.port_start = ep93xx_pata_port_start,
};
+static const struct soc_device_attribute ep93xx_soc_table[] = {
+ { .revision = "E1", .data = (void *)ATA_UDMA3 },
+ { .revision = "E2", .data = (void *)ATA_UDMA4 },
+ { /* sentinel */ }
+};
+
static int ep93xx_pata_probe(struct platform_device *pdev)
{
struct ep93xx_pata_data *drv_data;
@@ -939,7 +946,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
if (!drv_data) {
- err = -ENXIO;
+ err = -ENOMEM;
goto err_rel_gpio;
}
@@ -952,7 +959,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
if (!host) {
- err = -ENXIO;
+ err = -ENOMEM;
goto err_rel_dma;
}
@@ -976,12 +983,11 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
* so this driver supports only UDMA modes.
*/
if (drv_data->dma_rx_channel && drv_data->dma_tx_channel) {
- int chip_rev = ep93xx_chip_revision();
+ const struct soc_device_attribute *match;
- if (chip_rev == EP93XX_CHIP_REV_E1)
- ap->udma_mask = ATA_UDMA3;
- else if (chip_rev == EP93XX_CHIP_REV_E2)
- ap->udma_mask = ATA_UDMA4;
+ match = soc_device_match(ep93xx_soc_table);
+ if (match)
+ ap->udma_mask = (unsigned int) match->data;
else
ap->udma_mask = ATA_UDMA2;
}
@@ -1004,7 +1010,7 @@ err_rel_gpio:
return err;
}
-static int ep93xx_pata_remove(struct platform_device *pdev)
+static void ep93xx_pata_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct ep93xx_pata_data *drv_data = host->private_data;
@@ -1013,7 +1019,6 @@ static int ep93xx_pata_remove(struct platform_device *pdev)
ep93xx_pata_release_dma(drv_data);
ep93xx_pata_clear_regs(drv_data->ide_base);
ep93xx_ide_release_gpio(pdev);
- return 0;
}
static struct platform_driver ep93xx_pata_platform_driver = {
@@ -1021,7 +1026,7 @@ static struct platform_driver ep93xx_pata_platform_driver = {
.name = DRV_NAME,
},
.probe = ep93xx_pata_probe,
- .remove = ep93xx_pata_remove,
+ .remove_new = ep93xx_pata_remove,
};
module_platform_driver(ep93xx_pata_platform_driver);
diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c
index 996516e64f13..0c2ae430f5aa 100644
--- a/drivers/ata/pata_falcon.c
+++ b/drivers/ata/pata_falcon.c
@@ -28,11 +28,15 @@
#include <asm/atarihw.h>
#include <asm/atariints.h>
#include <asm/atari_stdma.h>
-#include <asm/ide.h>
#define DRV_NAME "pata_falcon"
#define DRV_VERSION "0.1.0"
+static int pata_falcon_swap_mask;
+
+module_param_named(data_swab, pata_falcon_swap_mask, int, 0444);
+MODULE_PARM_DESC(data_swab, "Data byte swap enable/disable bitmap (0x1==drive1, 0x2==drive2, 0x4==drive3, 0x8==drive4, default==0)");
+
static const struct scsi_host_template pata_falcon_sht = {
ATA_PIO_SHT(DRV_NAME),
};
@@ -50,7 +54,7 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
if (dev->class == ATA_DEV_ATA && cmd &&
!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)))
- swap = 0;
+ swap = (uintptr_t)ap->private_data & BIT(dev->devno);
/* Transfer multiple of 2 bytes */
if (rw == READ) {
@@ -123,8 +127,9 @@ static int __init pata_falcon_init_one(struct platform_device *pdev)
struct resource *base_res, *ctl_res, *irq_res;
struct ata_host *host;
struct ata_port *ap;
- void __iomem *base;
- int irq = 0;
+ void __iomem *base, *ctl_base;
+ int mask_shift = 0; /* Q40 & Falcon default */
+ int irq = 0, io_offset = 1, reg_shift = 2; /* Falcon defaults */
dev_info(&pdev->dev, "Atari Falcon and Q40/Q60 PATA controller\n");
@@ -165,26 +170,38 @@ static int __init pata_falcon_init_one(struct platform_device *pdev)
ap->pio_mask = ATA_PIO4;
ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY;
- base = (void __iomem *)base_mem_res->start;
/* N.B. this assumes data_addr will be used for word-sized I/O only */
- ap->ioaddr.data_addr = base + 0 + 0 * 4;
- ap->ioaddr.error_addr = base + 1 + 1 * 4;
- ap->ioaddr.feature_addr = base + 1 + 1 * 4;
- ap->ioaddr.nsect_addr = base + 1 + 2 * 4;
- ap->ioaddr.lbal_addr = base + 1 + 3 * 4;
- ap->ioaddr.lbam_addr = base + 1 + 4 * 4;
- ap->ioaddr.lbah_addr = base + 1 + 5 * 4;
- ap->ioaddr.device_addr = base + 1 + 6 * 4;
- ap->ioaddr.status_addr = base + 1 + 7 * 4;
- ap->ioaddr.command_addr = base + 1 + 7 * 4;
-
- base = (void __iomem *)ctl_mem_res->start;
- ap->ioaddr.altstatus_addr = base + 1;
- ap->ioaddr.ctl_addr = base + 1;
-
- ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
- (unsigned long)base_mem_res->start,
- (unsigned long)ctl_mem_res->start);
+ ap->ioaddr.data_addr = (void __iomem *)base_mem_res->start;
+
+ if (base_res) { /* only Q40 has IO resources */
+ io_offset = 0x10000;
+ reg_shift = 0;
+ base = (void __iomem *)base_res->start;
+ ctl_base = (void __iomem *)ctl_res->start;
+ } else {
+ base = (void __iomem *)base_mem_res->start;
+ ctl_base = (void __iomem *)ctl_mem_res->start;
+ }
+
+ ap->ioaddr.error_addr = base + io_offset + (1 << reg_shift);
+ ap->ioaddr.feature_addr = base + io_offset + (1 << reg_shift);
+ ap->ioaddr.nsect_addr = base + io_offset + (2 << reg_shift);
+ ap->ioaddr.lbal_addr = base + io_offset + (3 << reg_shift);
+ ap->ioaddr.lbam_addr = base + io_offset + (4 << reg_shift);
+ ap->ioaddr.lbah_addr = base + io_offset + (5 << reg_shift);
+ ap->ioaddr.device_addr = base + io_offset + (6 << reg_shift);
+ ap->ioaddr.status_addr = base + io_offset + (7 << reg_shift);
+ ap->ioaddr.command_addr = base + io_offset + (7 << reg_shift);
+
+ ap->ioaddr.altstatus_addr = ctl_base + io_offset;
+ ap->ioaddr.ctl_addr = ctl_base + io_offset;
+
+ ata_port_desc(ap, "cmd %px ctl %px data %px",
+ base, ctl_base, ap->ioaddr.data_addr);
+
+ if (pdev->id > 0)
+ mask_shift = 2;
+ ap->private_data = (void *)(uintptr_t)(pata_falcon_swap_mask >> mask_shift);
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (irq_res && irq_res->start > 0) {
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c
index 6f6734c09b11..4d6ef90ccc77 100644
--- a/drivers/ata/pata_ftide010.c
+++ b/drivers/ata/pata_ftide010.c
@@ -14,8 +14,7 @@
#include <linux/module.h>
#include <linux/libata.h>
#include <linux/bitops.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/clk.h>
#include "sata_gemini.h"
@@ -470,11 +469,7 @@ static int pata_ftide010_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- ftide->base = devm_ioremap_resource(dev, res);
+ ftide->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(ftide->base))
return PTR_ERR(ftide->base);
@@ -541,15 +536,13 @@ err_dis_clk:
return ret;
}
-static int pata_ftide010_remove(struct platform_device *pdev)
+static void pata_ftide010_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct ftide010 *ftide = host->private_data;
ata_host_detach(ftide->host);
clk_disable_unprepare(ftide->pclk);
-
- return 0;
}
static const struct of_device_id pata_ftide010_of_match[] = {
@@ -563,10 +556,11 @@ static struct platform_driver pata_ftide010_driver = {
.of_match_table = pata_ftide010_of_match,
},
.probe = pata_ftide010_probe,
- .remove = pata_ftide010_remove,
+ .remove_new = pata_ftide010_remove,
};
module_platform_driver(pata_ftide010_driver);
+MODULE_DESCRIPTION("low level driver for Faraday Technology FTIDE010");
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_gayle.c b/drivers/ata/pata_gayle.c
index e5aa07f92106..3bdbe2b65a2b 100644
--- a/drivers/ata/pata_gayle.c
+++ b/drivers/ata/pata_gayle.c
@@ -27,7 +27,6 @@
#include <asm/amigahw.h>
#include <asm/amigaints.h>
#include <asm/amigayle.h>
-#include <asm/ide.h>
#include <asm/setup.h>
#define DRV_NAME "pata_gayle"
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 4013f28679a9..d0aa8fc929b4 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -141,21 +141,15 @@ static int pata_imx_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->clk = devm_clk_get(&pdev->dev, NULL);
+ priv->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
- dev_err(&pdev->dev, "Failed to get clock\n");
+ dev_err(&pdev->dev, "Failed to get and enable clock\n");
return PTR_ERR(priv->clk);
}
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
-
host = ata_host_alloc(&pdev->dev, 1);
- if (!host) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!host)
+ return -ENOMEM;
host->private_data = priv;
ap = host->ports[0];
@@ -164,12 +158,9 @@ static int pata_imx_probe(struct platform_device *pdev)
ap->pio_mask = ATA_PIO4;
ap->flags |= ATA_FLAG_SLAVE_POSS;
- io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->host_regs = devm_ioremap_resource(&pdev->dev, io_res);
- if (IS_ERR(priv->host_regs)) {
- ret = PTR_ERR(priv->host_regs);
- goto err;
- }
+ priv->host_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &io_res);
+ if (IS_ERR(priv->host_regs))
+ return PTR_ERR(priv->host_regs);
ap->ioaddr.cmd_addr = priv->host_regs + PATA_IMX_DRIVE_DATA;
ap->ioaddr.ctl_addr = priv->host_regs + PATA_IMX_DRIVE_CONTROL;
@@ -195,16 +186,12 @@ static int pata_imx_probe(struct platform_device *pdev)
&pata_imx_sht);
if (ret)
- goto err;
+ return ret;
return 0;
-err:
- clk_disable_unprepare(priv->clk);
-
- return ret;
}
-static int pata_imx_remove(struct platform_device *pdev)
+static void pata_imx_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct pata_imx_priv *priv = host->private_data;
@@ -212,10 +199,6 @@ static int pata_imx_remove(struct platform_device *pdev)
ata_host_detach(host);
__raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
-
- clk_disable_unprepare(priv->clk);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -266,7 +249,7 @@ MODULE_DEVICE_TABLE(of, imx_pata_dt_ids);
static struct platform_driver pata_imx_driver = {
.probe = pata_imx_probe,
- .remove = pata_imx_remove,
+ .remove_new = pata_imx_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = imx_pata_dt_ids,
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index b1daa4d3fcd9..246bb4f8f1f7 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -242,12 +242,6 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
int ret;
int irq;
- cmd = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-
- if (!cmd || !ctl)
- return -EINVAL;
-
ixpp = devm_kzalloc(dev, sizeof(*ixpp), GFP_KERNEL);
if (!ixpp)
return -ENOMEM;
@@ -271,18 +265,18 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
if (ret)
return ret;
- ixpp->cmd = devm_ioremap_resource(dev, cmd);
- ixpp->ctl = devm_ioremap_resource(dev, ctl);
- if (IS_ERR(ixpp->cmd) || IS_ERR(ixpp->ctl))
- return -ENOMEM;
+ ixpp->cmd = devm_platform_get_and_ioremap_resource(pdev, 0, &cmd);
+ if (IS_ERR(ixpp->cmd))
+ return PTR_ERR(ixpp->cmd);
+
+ ixpp->ctl = devm_platform_get_and_ioremap_resource(pdev, 1, &ctl);
+ if (IS_ERR(ixpp->ctl))
+ return PTR_ERR(ixpp->ctl);
irq = platform_get_irq(pdev, 0);
- if (irq > 0)
- irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
- else if (irq < 0)
+ if (irq < 0)
return irq;
- else
- return -EINVAL;
+ irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
/* Just one port to set up */
ixp4xx_setup_port(ixpp->host->ports[0], ixpp, cmd->start, ctl->start);
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 66c9dea4ea6e..6c317a461a1f 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -19,9 +19,10 @@
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/libata.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/types.h>
#include <asm/cacheflush.h>
@@ -800,8 +801,7 @@ static int mpc52xx_ata_probe(struct platform_device *op)
return rv;
}
-static int
-mpc52xx_ata_remove(struct platform_device *op)
+static void mpc52xx_ata_remove(struct platform_device *op)
{
struct ata_host *host = platform_get_drvdata(op);
struct mpc52xx_ata_priv *priv = host->private_data;
@@ -815,8 +815,6 @@ mpc52xx_ata_remove(struct platform_device *op)
irq_dispose_mapping(task_irq);
bcom_ata_release(priv->dmatsk);
irq_dispose_mapping(priv->ata_irq);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -857,7 +855,7 @@ static const struct of_device_id mpc52xx_ata_of_match[] = {
static struct platform_driver mpc52xx_ata_of_platform_driver = {
.probe = mpc52xx_ata_probe,
- .remove = mpc52xx_ata_remove,
+ .remove_new = mpc52xx_ata_remove,
#ifdef CONFIG_PM_SLEEP
.suspend = mpc52xx_ata_suspend,
.resume = mpc52xx_ata_resume,
diff --git a/drivers/ata/pata_parport/comm.c b/drivers/ata/pata_parport/comm.c
index 4839becbbd56..94b8d352102e 100644
--- a/drivers/ata/pata_parport/comm.c
+++ b/drivers/ata/pata_parport/comm.c
@@ -37,7 +37,7 @@ static int comm_read_regr(struct pi_adapter *pi, int cont, int regr)
{
int l, h, r;
- r = regr + cont_map[cont];
+ r = regr + cont_map[cont];
switch (pi->mode) {
case 0:
@@ -90,7 +90,6 @@ static void comm_connect(struct pi_adapter *pi)
}
static void comm_disconnect(struct pi_adapter *pi)
-
{
w2(0); w2(0); w2(0); w2(4);
w0(pi->saved_r0);
@@ -172,12 +171,12 @@ static void comm_write_block(struct pi_adapter *pi, char *buf, int count)
w4l(swab16(((u16 *)buf)[2 * k]) |
swab16(((u16 *)buf)[2 * k + 1]) << 16);
break;
- }
+ }
}
static void comm_log_adapter(struct pi_adapter *pi)
-
-{ char *mode_string[5] = { "4-bit", "8-bit", "EPP-8", "EPP-16", "EPP-32" };
+{
+ char *mode_string[5] = { "4-bit", "8-bit", "EPP-8", "EPP-16", "EPP-32" };
dev_info(&pi->dev,
"DataStor Commuter at 0x%x, mode %d (%s), delay %d\n",
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index ea402e02c46e..5275c6464f57 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -295,7 +295,7 @@ static int pxa_ata_probe(struct platform_device *pdev)
return ret;
}
-static int pxa_ata_remove(struct platform_device *pdev)
+static void pxa_ata_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct pata_pxa_data *data = host->ports[0]->private_data;
@@ -303,13 +303,11 @@ static int pxa_ata_remove(struct platform_device *pdev)
dma_release_channel(data->dma_chan);
ata_host_detach(host);
-
- return 0;
}
static struct platform_driver pxa_ata_driver = {
.probe = pxa_ata_probe,
- .remove = pxa_ata_remove,
+ .remove_new = pxa_ata_remove,
.driver = {
.name = DRV_NAME,
},
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 3974d294a341..0fa253ad7c93 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -155,18 +155,16 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
return 0;
}
-static int rb532_pata_driver_remove(struct platform_device *pdev)
+static void rb532_pata_driver_remove(struct platform_device *pdev)
{
struct ata_host *ah = platform_get_drvdata(pdev);
ata_host_detach(ah);
-
- return 0;
}
static struct platform_driver rb532_pata_platform_driver = {
.probe = rb532_pata_driver_probe,
- .remove = rb532_pata_driver_remove,
+ .remove_new = rb532_pata_driver_remove,
.driver = {
.name = DRV_NAME,
},
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 3b62ea482f1a..93882e976ede 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -180,8 +180,7 @@ static void sl82c105_bmdma_start(struct ata_queued_cmd *qc)
* document.
*
* This function is also called to turn off DMA when a timeout occurs
- * during DMA operation. In both cases we need to reset the engine,
- * so no actual eng_timeout handler is required.
+ * during DMA operation. In both cases we need to reset the engine.
*
* We assume bmdma_stop is always called if bmdma_start as called. If
* not then we may need to wrap qc_issue.
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index fabdd1e380f9..52f5168e4db5 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -18,9 +18,8 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/libata.h>
@@ -1211,7 +1210,7 @@ error_out:
return err;
}
-static int sata_dwc_remove(struct platform_device *ofdev)
+static void sata_dwc_remove(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct ata_host *host = dev_get_drvdata(dev);
@@ -1227,7 +1226,6 @@ static int sata_dwc_remove(struct platform_device *ofdev)
#endif
dev_dbg(dev, "done\n");
- return 0;
}
static const struct of_device_id sata_dwc_match[] = {
@@ -1242,7 +1240,7 @@ static struct platform_driver sata_dwc_driver = {
.of_match_table = sata_dwc_match,
},
.probe = sata_dwc_probe,
- .remove = sata_dwc_remove,
+ .remove_new = sata_dwc_remove,
};
module_platform_driver(sata_dwc_driver);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index ccd99b9aa9ff..01aa05f4c3f5 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -12,6 +12,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -19,9 +22,6 @@
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
#include <asm/io.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
static unsigned int intr_coalescing_count;
module_param(intr_coalescing_count, int, S_IRUGO);
@@ -1526,7 +1526,7 @@ error_exit_with_cleanup:
return retval;
}
-static int sata_fsl_remove(struct platform_device *ofdev)
+static void sata_fsl_remove(struct platform_device *ofdev)
{
struct ata_host *host = platform_get_drvdata(ofdev);
struct sata_fsl_host_priv *host_priv = host->private_data;
@@ -1535,8 +1535,6 @@ static int sata_fsl_remove(struct platform_device *ofdev)
device_remove_file(&ofdev->dev, &host_priv->rx_watermark);
ata_host_detach(host);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1591,7 +1589,7 @@ static struct platform_driver fsl_sata_driver = {
.of_match_table = fsl_sata_match,
},
.probe = sata_fsl_probe,
- .remove = sata_fsl_remove,
+ .remove_new = sata_fsl_remove,
#ifdef CONFIG_PM_SLEEP
.suspend = sata_fsl_suspend,
.resume = sata_fsl_resume,
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index c42cc9bbbc4e..400b22ee99c3 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -12,8 +12,7 @@
#include <linux/regmap.h>
#include <linux/delay.h>
#include <linux/reset.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/pinctrl/consumer.h>
@@ -400,7 +399,7 @@ out_unprep_clk:
return ret;
}
-static int gemini_sata_remove(struct platform_device *pdev)
+static void gemini_sata_remove(struct platform_device *pdev)
{
struct sata_gemini *sg = platform_get_drvdata(pdev);
@@ -409,8 +408,6 @@ static int gemini_sata_remove(struct platform_device *pdev)
clk_unprepare(sg->sata0_pclk);
}
sg_singleton = NULL;
-
- return 0;
}
static const struct of_device_id gemini_sata_of_match[] = {
@@ -424,10 +421,11 @@ static struct platform_driver gemini_sata_driver = {
.of_match_table = gemini_sata_of_match,
},
.probe = gemini_sata_probe,
- .remove = gemini_sata_remove,
+ .remove_new = gemini_sata_remove,
};
module_platform_driver(gemini_sata_driver);
+MODULE_DESCRIPTION("low level driver for Cortina Systems Gemini SATA bridge");
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index d6b324d03e59..63ef7bb073ce 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -13,7 +13,7 @@
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/device.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/libata.h>
@@ -385,7 +385,7 @@ static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
- static const unsigned long timing[] = { 5, 100, 500};
+ static const unsigned int timing[] = { 5, 100, 500};
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 2c8c78ed86c1..db9c255dc9f2 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -619,7 +619,7 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
struct ata_port *ap = link->ap;
void __iomem *port_base = inic_port_base(ap);
void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
- const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
int rc;
/* hammer it into sane state */
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index d404e631d152..45e48d653c60 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1255,8 +1255,8 @@ static void mv_dump_mem(struct device *dev, void __iomem *start, unsigned bytes)
for (b = 0; b < bytes; ) {
for (w = 0, o = 0; b < bytes && w < 4; w++) {
- o += snprintf(linebuf + o, sizeof(linebuf) - o,
- "%08x ", readl(start + b));
+ o += scnprintf(linebuf + o, sizeof(linebuf) - o,
+ "%08x ", readl(start + b));
b += sizeof(u32);
}
dev_dbg(dev, "%s: %p: %s\n",
@@ -3633,7 +3633,7 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class,
/* Workaround for errata FEr SATA#10 (part 2) */
do {
- const unsigned long *timing =
+ const unsigned int *timing =
sata_ehc_deb_timing(&link->eh_context);
rc = sata_link_hardreset(link, timing, deadline + extra,
@@ -4210,7 +4210,7 @@ err:
* A platform bus SATA device has been unplugged. Perform the needed
* cleanup. Also called on module unload for any active devices.
*/
-static int mv_platform_remove(struct platform_device *pdev)
+static void mv_platform_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct mv_host_priv *hpriv = host->private_data;
@@ -4228,7 +4228,6 @@ static int mv_platform_remove(struct platform_device *pdev)
}
phy_power_off(hpriv->port_phys[port]);
}
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -4284,7 +4283,7 @@ MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
static struct platform_driver mv_platform_driver = {
.probe = mv_platform_probe,
- .remove = mv_platform_remove,
+ .remove_new = mv_platform_remove,
.suspend = mv_platform_suspend,
.resume = mv_platform_resume,
.driver = {
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index abf5651c87ab..0a0cee755bde 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -1529,7 +1529,7 @@ static int nv_hardreset(struct ata_link *link, unsigned int *class,
sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
NULL, NULL);
else {
- const unsigned long *timing = sata_ehc_deb_timing(ehc);
+ const unsigned int *timing = sata_ehc_deb_timing(ehc);
int rc;
if (!(ehc->i.flags & ATA_EHI_QUIET))
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 34790f15c1b8..c1469d076880 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -11,7 +11,7 @@
#include <linux/module.h>
#include <linux/ata.h>
#include <linux/libata.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/err.h>
@@ -861,15 +861,11 @@ static int sata_rcar_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct ata_host *host;
struct sata_rcar_priv *priv;
- struct resource *mem;
- int irq;
- int ret = 0;
+ int irq, ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- if (!irq)
- return -EINVAL;
priv = devm_kzalloc(dev, sizeof(struct sata_rcar_priv), GFP_KERNEL);
if (!priv)
@@ -890,8 +886,7 @@ static int sata_rcar_probe(struct platform_device *pdev)
host->private_data = priv;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(dev, mem);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto err_pm_put;
@@ -914,7 +909,7 @@ err_pm_put:
return ret;
}
-static int sata_rcar_remove(struct platform_device *pdev)
+static void sata_rcar_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct sata_rcar_priv *priv = host->private_data;
@@ -930,8 +925,6 @@ static int sata_rcar_remove(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1016,7 +1009,7 @@ static const struct dev_pm_ops sata_rcar_pm_ops = {
static struct platform_driver sata_rcar_driver = {
.probe = sata_rcar_probe,
- .remove = sata_rcar_remove,
+ .remove_new = sata_rcar_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = sata_rcar_match,
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index e72a0257990d..142e70bfc498 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -597,7 +597,7 @@ static int sil24_init_port(struct ata_port *ap)
static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp,
const struct ata_taskfile *tf,
int is_cmd, u32 ctrl,
- unsigned long timeout_msec)
+ unsigned int timeout_msec)
{
void __iomem *port = sil24_port_base(ap);
struct sil24_port_priv *pp = ap->private_data;
@@ -651,7 +651,7 @@ static int sil24_softreset(struct ata_link *link, unsigned int *class,
{
struct ata_port *ap = link->ap;
int pmp = sata_srst_pmp(link);
- unsigned long timeout_msec = 0;
+ unsigned int timeout_msec = 0;
struct ata_taskfile tf;
const char *reason;
int rc;
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index ccc016072637..b51d7a9d0d90 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -232,7 +232,6 @@ static const struct scsi_host_template pdc_sata_sht = {
.dma_boundary = ATA_DMA_BOUNDARY,
};
-/* TODO: inherit from base port_ops after converting to new EH */
static struct ata_port_operations pdc_20621_ops = {
.inherits = &ata_sff_port_ops,
diff --git a/drivers/base/core.c b/drivers/base/core.c
index b7d7f410c256..4d8b315c48a1 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -3537,6 +3537,8 @@ int device_add(struct device *dev)
/* subsystems can specify simple device enumeration */
else if (dev->bus && dev->bus->dev_name)
error = dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
+ else
+ error = -EINVAL;
if (error)
goto name_error;
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index db716ffd083e..3db88bbcae0f 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -453,7 +453,8 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
if (!rbnode)
return -ENOMEM;
regcache_rbtree_set_register(map, rbnode,
- reg - rbnode->base_reg, value);
+ (reg - rbnode->base_reg) / map->reg_stride,
+ value);
regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
rbtree_ctx->cached_rbnode = rbnode;
}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 79ab532aabaf..6bc86106c7b2 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1557,7 +1557,7 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
do {
int sent;
- bvec_set_page(&bvec, page, offset, len);
+ bvec_set_page(&bvec, page, len, offset);
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
sent = sock_sendmsg(socket, &msg);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index df1cd0f718b8..800f131222fc 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1436,8 +1436,9 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd)
static void nbd_clear_sock_ioctl(struct nbd_device *nbd)
{
- blk_mark_disk_dead(nbd->disk);
nbd_clear_sock(nbd);
+ disk_force_media_change(nbd->disk);
+ nbd_bdev_reset(nbd);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
&nbd->config->runtime_flags))
nbd_config_put(nbd);
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 864013019d6b..968090935eb2 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1643,9 +1643,12 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
struct nullb_queue *nq = hctx->driver_data;
LIST_HEAD(list);
int nr = 0;
+ struct request *rq;
spin_lock(&nq->poll_lock);
list_splice_init(&nq->poll_list, &list);
+ list_for_each_entry(rq, &list, queuelist)
+ blk_mq_set_request_complete(rq);
spin_unlock(&nq->poll_lock);
while (!list_empty(&list)) {
@@ -1671,16 +1674,21 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
- pr_info("rq %p timed out\n", rq);
-
if (hctx->type == HCTX_TYPE_POLL) {
struct nullb_queue *nq = hctx->driver_data;
spin_lock(&nq->poll_lock);
+ /* The request may have completed meanwhile. */
+ if (blk_mq_request_completed(rq)) {
+ spin_unlock(&nq->poll_lock);
+ return BLK_EH_DONE;
+ }
list_del_init(&rq->queuelist);
spin_unlock(&nq->poll_lock);
}
+ pr_info("rq %p timed out\n", rq);
+
/*
* If the device is marked as blocking (i.e. memory backed or zoned
* device), the submission path may be blocked waiting for resources
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 2328cc05be36..a999b698b131 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -632,9 +632,8 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
static int rbd_dev_refresh(struct rbd_device *rbd_dev);
-static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
-static int rbd_dev_header_info(struct rbd_device *rbd_dev);
-static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
+static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header);
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
u64 snap_id);
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
@@ -995,15 +994,24 @@ static void rbd_init_layout(struct rbd_device *rbd_dev)
RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
}
+static void rbd_image_header_cleanup(struct rbd_image_header *header)
+{
+ kfree(header->object_prefix);
+ ceph_put_snap_context(header->snapc);
+ kfree(header->snap_sizes);
+ kfree(header->snap_names);
+
+ memset(header, 0, sizeof(*header));
+}
+
/*
* Fill an rbd image header with information from the given format 1
* on-disk header.
*/
-static int rbd_header_from_disk(struct rbd_device *rbd_dev,
- struct rbd_image_header_ondisk *ondisk)
+static int rbd_header_from_disk(struct rbd_image_header *header,
+ struct rbd_image_header_ondisk *ondisk,
+ bool first_time)
{
- struct rbd_image_header *header = &rbd_dev->header;
- bool first_time = header->object_prefix == NULL;
struct ceph_snap_context *snapc;
char *object_prefix = NULL;
char *snap_names = NULL;
@@ -1070,11 +1078,6 @@ static int rbd_header_from_disk(struct rbd_device *rbd_dev,
if (first_time) {
header->object_prefix = object_prefix;
header->obj_order = ondisk->options.order;
- rbd_init_layout(rbd_dev);
- } else {
- ceph_put_snap_context(header->snapc);
- kfree(header->snap_names);
- kfree(header->snap_sizes);
}
/* The remaining fields always get updated (when we refresh) */
@@ -4859,7 +4862,9 @@ out_req:
* return, the rbd_dev->header field will contain up-to-date
* information about the image.
*/
-static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
+static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header,
+ bool first_time)
{
struct rbd_image_header_ondisk *ondisk = NULL;
u32 snap_count = 0;
@@ -4907,7 +4912,7 @@ static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
snap_count = le32_to_cpu(ondisk->snap_count);
} while (snap_count != want_count);
- ret = rbd_header_from_disk(rbd_dev, ondisk);
+ ret = rbd_header_from_disk(header, ondisk, first_time);
out:
kfree(ondisk);
@@ -4931,39 +4936,6 @@ static void rbd_dev_update_size(struct rbd_device *rbd_dev)
}
}
-static int rbd_dev_refresh(struct rbd_device *rbd_dev)
-{
- u64 mapping_size;
- int ret;
-
- down_write(&rbd_dev->header_rwsem);
- mapping_size = rbd_dev->mapping.size;
-
- ret = rbd_dev_header_info(rbd_dev);
- if (ret)
- goto out;
-
- /*
- * If there is a parent, see if it has disappeared due to the
- * mapped image getting flattened.
- */
- if (rbd_dev->parent) {
- ret = rbd_dev_v2_parent_info(rbd_dev);
- if (ret)
- goto out;
- }
-
- rbd_assert(!rbd_is_snap(rbd_dev));
- rbd_dev->mapping.size = rbd_dev->header.image_size;
-
-out:
- up_write(&rbd_dev->header_rwsem);
- if (!ret && mapping_size != rbd_dev->mapping.size)
- rbd_dev_update_size(rbd_dev);
-
- return ret;
-}
-
static const struct blk_mq_ops rbd_mq_ops = {
.queue_rq = rbd_queue_rq,
};
@@ -5503,17 +5475,12 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
return 0;
}
-static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
-{
- return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
- &rbd_dev->header.obj_order,
- &rbd_dev->header.image_size);
-}
-
-static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev,
+ char **pobject_prefix)
{
size_t size;
void *reply_buf;
+ char *object_prefix;
int ret;
void *p;
@@ -5531,16 +5498,16 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
goto out;
p = reply_buf;
- rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
- p + ret, NULL, GFP_NOIO);
+ object_prefix = ceph_extract_encoded_string(&p, p + ret, NULL,
+ GFP_NOIO);
+ if (IS_ERR(object_prefix)) {
+ ret = PTR_ERR(object_prefix);
+ goto out;
+ }
ret = 0;
- if (IS_ERR(rbd_dev->header.object_prefix)) {
- ret = PTR_ERR(rbd_dev->header.object_prefix);
- rbd_dev->header.object_prefix = NULL;
- } else {
- dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
- }
+ *pobject_prefix = object_prefix;
+ dout(" object_prefix = %s\n", object_prefix);
out:
kfree(reply_buf);
@@ -5591,13 +5558,6 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
return 0;
}
-static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
-{
- return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
- rbd_is_ro(rbd_dev),
- &rbd_dev->header.features);
-}
-
/*
* These are generic image flags, but since they are used only for
* object map, store them in rbd_dev->object_map_flags.
@@ -5634,6 +5594,14 @@ struct parent_image_info {
u64 overlap;
};
+static void rbd_parent_info_cleanup(struct parent_image_info *pii)
+{
+ kfree(pii->pool_ns);
+ kfree(pii->image_id);
+
+ memset(pii, 0, sizeof(*pii));
+}
+
/*
* The caller is responsible for @pii.
*/
@@ -5703,6 +5671,9 @@ static int __get_parent_info(struct rbd_device *rbd_dev,
if (pii->has_overlap)
ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
+ dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
+ __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
+ pii->has_overlap, pii->overlap);
return 0;
e_inval:
@@ -5741,14 +5712,17 @@ static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
pii->has_overlap = true;
ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
+ dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
+ __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
+ pii->has_overlap, pii->overlap);
return 0;
e_inval:
return -EINVAL;
}
-static int get_parent_info(struct rbd_device *rbd_dev,
- struct parent_image_info *pii)
+static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev,
+ struct parent_image_info *pii)
{
struct page *req_page, *reply_page;
void *p;
@@ -5776,7 +5750,7 @@ static int get_parent_info(struct rbd_device *rbd_dev,
return ret;
}
-static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
+static int rbd_dev_setup_parent(struct rbd_device *rbd_dev)
{
struct rbd_spec *parent_spec;
struct parent_image_info pii = { 0 };
@@ -5786,37 +5760,12 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
if (!parent_spec)
return -ENOMEM;
- ret = get_parent_info(rbd_dev, &pii);
+ ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
if (ret)
goto out_err;
- dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
- __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
- pii.has_overlap, pii.overlap);
-
- if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
- /*
- * Either the parent never existed, or we have
- * record of it but the image got flattened so it no
- * longer has a parent. When the parent of a
- * layered image disappears we immediately set the
- * overlap to 0. The effect of this is that all new
- * requests will be treated as if the image had no
- * parent.
- *
- * If !pii.has_overlap, the parent image spec is not
- * applicable. It's there to avoid duplication in each
- * snapshot record.
- */
- if (rbd_dev->parent_overlap) {
- rbd_dev->parent_overlap = 0;
- rbd_dev_parent_put(rbd_dev);
- pr_info("%s: clone image has been flattened\n",
- rbd_dev->disk->disk_name);
- }
-
+ if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap)
goto out; /* No parent? No problem. */
- }
/* The ceph file layout needs to fit pool id in 32 bits */
@@ -5828,58 +5777,46 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
}
/*
- * The parent won't change (except when the clone is
- * flattened, already handled that). So we only need to
- * record the parent spec we have not already done so.
+ * The parent won't change except when the clone is flattened,
+ * so we only need to record the parent image spec once.
*/
- if (!rbd_dev->parent_spec) {
- parent_spec->pool_id = pii.pool_id;
- if (pii.pool_ns && *pii.pool_ns) {
- parent_spec->pool_ns = pii.pool_ns;
- pii.pool_ns = NULL;
- }
- parent_spec->image_id = pii.image_id;
- pii.image_id = NULL;
- parent_spec->snap_id = pii.snap_id;
-
- rbd_dev->parent_spec = parent_spec;
- parent_spec = NULL; /* rbd_dev now owns this */
+ parent_spec->pool_id = pii.pool_id;
+ if (pii.pool_ns && *pii.pool_ns) {
+ parent_spec->pool_ns = pii.pool_ns;
+ pii.pool_ns = NULL;
}
+ parent_spec->image_id = pii.image_id;
+ pii.image_id = NULL;
+ parent_spec->snap_id = pii.snap_id;
+
+ rbd_assert(!rbd_dev->parent_spec);
+ rbd_dev->parent_spec = parent_spec;
+ parent_spec = NULL; /* rbd_dev now owns this */
/*
- * We always update the parent overlap. If it's zero we issue
- * a warning, as we will proceed as if there was no parent.
+ * Record the parent overlap. If it's zero, issue a warning as
+ * we will proceed as if there is no parent.
*/
- if (!pii.overlap) {
- if (parent_spec) {
- /* refresh, careful to warn just once */
- if (rbd_dev->parent_overlap)
- rbd_warn(rbd_dev,
- "clone now standalone (overlap became 0)");
- } else {
- /* initial probe */
- rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
- }
- }
+ if (!pii.overlap)
+ rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
rbd_dev->parent_overlap = pii.overlap;
out:
ret = 0;
out_err:
- kfree(pii.pool_ns);
- kfree(pii.image_id);
+ rbd_parent_info_cleanup(&pii);
rbd_spec_put(parent_spec);
return ret;
}
-static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev,
+ u64 *stripe_unit, u64 *stripe_count)
{
struct {
__le64 stripe_unit;
__le64 stripe_count;
} __attribute__ ((packed)) striping_info_buf = { 0 };
size_t size = sizeof (striping_info_buf);
- void *p;
int ret;
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
@@ -5891,27 +5828,33 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
if (ret < size)
return -ERANGE;
- p = &striping_info_buf;
- rbd_dev->header.stripe_unit = ceph_decode_64(&p);
- rbd_dev->header.stripe_count = ceph_decode_64(&p);
+ *stripe_unit = le64_to_cpu(striping_info_buf.stripe_unit);
+ *stripe_count = le64_to_cpu(striping_info_buf.stripe_count);
+ dout(" stripe_unit = %llu stripe_count = %llu\n", *stripe_unit,
+ *stripe_count);
+
return 0;
}
-static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev, s64 *data_pool_id)
{
- __le64 data_pool_id;
+ __le64 data_pool_buf;
int ret;
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
&rbd_dev->header_oloc, "get_data_pool",
- NULL, 0, &data_pool_id, sizeof(data_pool_id));
+ NULL, 0, &data_pool_buf,
+ sizeof(data_pool_buf));
+ dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
return ret;
- if (ret < sizeof(data_pool_id))
+ if (ret < sizeof(data_pool_buf))
return -EBADMSG;
- rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
- WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
+ *data_pool_id = le64_to_cpu(data_pool_buf);
+ dout(" data_pool_id = %lld\n", *data_pool_id);
+ WARN_ON(*data_pool_id == CEPH_NOPOOL);
+
return 0;
}
@@ -6103,7 +6046,8 @@ out_err:
return ret;
}
-static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev,
+ struct ceph_snap_context **psnapc)
{
size_t size;
int ret;
@@ -6164,9 +6108,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
for (i = 0; i < snap_count; i++)
snapc->snaps[i] = ceph_decode_64(&p);
- ceph_put_snap_context(rbd_dev->header.snapc);
- rbd_dev->header.snapc = snapc;
-
+ *psnapc = snapc;
dout(" snap context seq = %llu, snap_count = %u\n",
(unsigned long long)seq, (unsigned int)snap_count);
out:
@@ -6215,38 +6157,42 @@ out:
return snap_name;
}
-static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header,
+ bool first_time)
{
- bool first_time = rbd_dev->header.object_prefix == NULL;
int ret;
- ret = rbd_dev_v2_image_size(rbd_dev);
+ ret = _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
+ first_time ? &header->obj_order : NULL,
+ &header->image_size);
if (ret)
return ret;
if (first_time) {
- ret = rbd_dev_v2_header_onetime(rbd_dev);
+ ret = rbd_dev_v2_header_onetime(rbd_dev, header);
if (ret)
return ret;
}
- ret = rbd_dev_v2_snap_context(rbd_dev);
- if (ret && first_time) {
- kfree(rbd_dev->header.object_prefix);
- rbd_dev->header.object_prefix = NULL;
- }
+ ret = rbd_dev_v2_snap_context(rbd_dev, &header->snapc);
+ if (ret)
+ return ret;
- return ret;
+ return 0;
}
-static int rbd_dev_header_info(struct rbd_device *rbd_dev)
+static int rbd_dev_header_info(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header,
+ bool first_time)
{
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+ rbd_assert(!header->object_prefix && !header->snapc);
if (rbd_dev->image_format == 1)
- return rbd_dev_v1_header_info(rbd_dev);
+ return rbd_dev_v1_header_info(rbd_dev, header, first_time);
- return rbd_dev_v2_header_info(rbd_dev);
+ return rbd_dev_v2_header_info(rbd_dev, header, first_time);
}
/*
@@ -6734,60 +6680,49 @@ out:
*/
static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
- struct rbd_image_header *header;
-
rbd_dev_parent_put(rbd_dev);
rbd_object_map_free(rbd_dev);
rbd_dev_mapping_clear(rbd_dev);
/* Free dynamic fields from the header, then zero it out */
- header = &rbd_dev->header;
- ceph_put_snap_context(header->snapc);
- kfree(header->snap_sizes);
- kfree(header->snap_names);
- kfree(header->object_prefix);
- memset(header, 0, sizeof (*header));
+ rbd_image_header_cleanup(&rbd_dev->header);
}
-static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header)
{
int ret;
- ret = rbd_dev_v2_object_prefix(rbd_dev);
+ ret = rbd_dev_v2_object_prefix(rbd_dev, &header->object_prefix);
if (ret)
- goto out_err;
+ return ret;
/*
* Get the and check features for the image. Currently the
* features are assumed to never change.
*/
- ret = rbd_dev_v2_features(rbd_dev);
+ ret = _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
+ rbd_is_ro(rbd_dev), &header->features);
if (ret)
- goto out_err;
+ return ret;
/* If the image supports fancy striping, get its parameters */
- if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
- ret = rbd_dev_v2_striping_info(rbd_dev);
- if (ret < 0)
- goto out_err;
+ if (header->features & RBD_FEATURE_STRIPINGV2) {
+ ret = rbd_dev_v2_striping_info(rbd_dev, &header->stripe_unit,
+ &header->stripe_count);
+ if (ret)
+ return ret;
}
- if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
- ret = rbd_dev_v2_data_pool(rbd_dev);
+ if (header->features & RBD_FEATURE_DATA_POOL) {
+ ret = rbd_dev_v2_data_pool(rbd_dev, &header->data_pool_id);
if (ret)
- goto out_err;
+ return ret;
}
- rbd_init_layout(rbd_dev);
return 0;
-
-out_err:
- rbd_dev->header.features = 0;
- kfree(rbd_dev->header.object_prefix);
- rbd_dev->header.object_prefix = NULL;
- return ret;
}
/*
@@ -6982,13 +6917,15 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
if (!depth)
down_write(&rbd_dev->header_rwsem);
- ret = rbd_dev_header_info(rbd_dev);
+ ret = rbd_dev_header_info(rbd_dev, &rbd_dev->header, true);
if (ret) {
if (ret == -ENOENT && !need_watch)
rbd_print_dne(rbd_dev, false);
goto err_out_probe;
}
+ rbd_init_layout(rbd_dev);
+
/*
* If this image is the one being mapped, we have pool name and
* id, image name and id, and snap name - need to fill snap id.
@@ -7017,7 +6954,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
}
if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
- ret = rbd_dev_v2_parent_info(rbd_dev);
+ ret = rbd_dev_setup_parent(rbd_dev);
if (ret)
goto err_out_probe;
}
@@ -7043,6 +6980,107 @@ err_out_format:
return ret;
}
+static void rbd_dev_update_header(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header)
+{
+ rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+ rbd_assert(rbd_dev->header.object_prefix); /* !first_time */
+
+ if (rbd_dev->header.image_size != header->image_size) {
+ rbd_dev->header.image_size = header->image_size;
+
+ if (!rbd_is_snap(rbd_dev)) {
+ rbd_dev->mapping.size = header->image_size;
+ rbd_dev_update_size(rbd_dev);
+ }
+ }
+
+ ceph_put_snap_context(rbd_dev->header.snapc);
+ rbd_dev->header.snapc = header->snapc;
+ header->snapc = NULL;
+
+ if (rbd_dev->image_format == 1) {
+ kfree(rbd_dev->header.snap_names);
+ rbd_dev->header.snap_names = header->snap_names;
+ header->snap_names = NULL;
+
+ kfree(rbd_dev->header.snap_sizes);
+ rbd_dev->header.snap_sizes = header->snap_sizes;
+ header->snap_sizes = NULL;
+ }
+}
+
+static void rbd_dev_update_parent(struct rbd_device *rbd_dev,
+ struct parent_image_info *pii)
+{
+ if (pii->pool_id == CEPH_NOPOOL || !pii->has_overlap) {
+ /*
+ * Either the parent never existed, or we have
+ * record of it but the image got flattened so it no
+ * longer has a parent. When the parent of a
+ * layered image disappears we immediately set the
+ * overlap to 0. The effect of this is that all new
+ * requests will be treated as if the image had no
+ * parent.
+ *
+ * If !pii.has_overlap, the parent image spec is not
+ * applicable. It's there to avoid duplication in each
+ * snapshot record.
+ */
+ if (rbd_dev->parent_overlap) {
+ rbd_dev->parent_overlap = 0;
+ rbd_dev_parent_put(rbd_dev);
+ pr_info("%s: clone has been flattened\n",
+ rbd_dev->disk->disk_name);
+ }
+ } else {
+ rbd_assert(rbd_dev->parent_spec);
+
+ /*
+ * Update the parent overlap. If it became zero, issue
+ * a warning as we will proceed as if there is no parent.
+ */
+ if (!pii->overlap && rbd_dev->parent_overlap)
+ rbd_warn(rbd_dev,
+ "clone has become standalone (overlap 0)");
+ rbd_dev->parent_overlap = pii->overlap;
+ }
+}
+
+static int rbd_dev_refresh(struct rbd_device *rbd_dev)
+{
+ struct rbd_image_header header = { 0 };
+ struct parent_image_info pii = { 0 };
+ int ret;
+
+ dout("%s rbd_dev %p\n", __func__, rbd_dev);
+
+ ret = rbd_dev_header_info(rbd_dev, &header, false);
+ if (ret)
+ goto out;
+
+ /*
+ * If there is a parent, see if it has disappeared due to the
+ * mapped image getting flattened.
+ */
+ if (rbd_dev->parent) {
+ ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
+ if (ret)
+ goto out;
+ }
+
+ down_write(&rbd_dev->header_rwsem);
+ rbd_dev_update_header(rbd_dev, &header);
+ if (rbd_dev->parent)
+ rbd_dev_update_parent(rbd_dev, &pii);
+ up_write(&rbd_dev->header_rwsem);
+
+out:
+ rbd_parent_info_cleanup(&pii);
+ rbd_image_header_cleanup(&header);
+ return ret;
+}
+
static ssize_t do_rbd_add(const char *buf, size_t count)
{
struct rbd_device *rbd_dev = NULL;
@@ -7199,7 +7237,6 @@ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
static ssize_t do_rbd_remove(const char *buf, size_t count)
{
struct rbd_device *rbd_dev = NULL;
- struct list_head *tmp;
int dev_id;
char opt_buf[6];
bool force = false;
@@ -7226,8 +7263,7 @@ static ssize_t do_rbd_remove(const char *buf, size_t count)
ret = -ENOENT;
spin_lock(&rbd_dev_list_lock);
- list_for_each(tmp, &rbd_dev_list) {
- rbd_dev = list_entry(tmp, struct rbd_device, node);
+ list_for_each_entry(rbd_dev, &rbd_dev_list, node) {
if (rbd_dev->dev_id == dev_id) {
ret = 0;
break;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 82597ab4f747..499f4809fcdf 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -4419,6 +4419,7 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_QCA_ROME) {
data->setup_on_usb = btusb_setup_qca;
+ hdev->shutdown = btusb_shutdown_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
hdev->cmd_timeout = btusb_qca_cmd_timeout;
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index eb4e7bee1e20..d57bc066dce6 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -38,6 +38,7 @@ enum sysc_soc {
SOC_2420,
SOC_2430,
SOC_3430,
+ SOC_AM35,
SOC_3630,
SOC_4430,
SOC_4460,
@@ -1097,6 +1098,11 @@ static int sysc_enable_module(struct device *dev)
if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE |
SYSC_QUIRK_SWSUP_SIDLE_ACT)) {
best_mode = SYSC_IDLE_NO;
+
+ /* Clear WAKEUP */
+ if (regbits->enwkup_shift >= 0 &&
+ ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
+ reg &= ~BIT(regbits->enwkup_shift);
} else {
best_mode = fls(ddata->cfg.sidlemodes) - 1;
if (best_mode > SYSC_IDLE_MASK) {
@@ -1224,6 +1230,13 @@ set_sidle:
}
}
+ if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE_ACT) {
+ /* Set WAKEUP */
+ if (regbits->enwkup_shift >= 0 &&
+ ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
+ reg |= BIT(regbits->enwkup_shift);
+ }
+
reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
reg |= best_mode << regbits->sidle_shift;
if (regbits->autoidle_shift >= 0 &&
@@ -1518,16 +1531,16 @@ struct sysc_revision_quirk {
static const struct sysc_revision_quirk sysc_revision_quirks[] = {
/* These drivers need to be fixed to not use pm_runtime_irq_safe() */
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
/* Uarts on omap4 and later */
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
/* Quirks that need to be set based on the module address */
SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
@@ -1862,7 +1875,7 @@ static void sysc_pre_reset_quirk_dss(struct sysc *ddata)
dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n",
__func__, val, irq_mask);
- if (sysc_soc->soc == SOC_3430) {
+ if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35) {
/* Clear DSS_SDI_CONTROL */
sysc_write(ddata, 0x44, 0);
@@ -2150,8 +2163,7 @@ static int sysc_reset(struct sysc *ddata)
}
if (ddata->cfg.srst_udelay)
- usleep_range(ddata->cfg.srst_udelay,
- ddata->cfg.srst_udelay * 2);
+ fsleep(ddata->cfg.srst_udelay);
if (ddata->post_reset_quirk)
ddata->post_reset_quirk(ddata);
@@ -3025,6 +3037,7 @@ static void ti_sysc_idle(struct work_struct *work)
static const struct soc_device_attribute sysc_soc_match[] = {
SOC_FLAG("OMAP242*", SOC_2420),
SOC_FLAG("OMAP243*", SOC_2430),
+ SOC_FLAG("AM35*", SOC_AM35),
SOC_FLAG("OMAP3[45]*", SOC_3430),
SOC_FLAG("OMAP3[67]*", SOC_3630),
SOC_FLAG("OMAP443*", SOC_4430),
@@ -3229,7 +3242,7 @@ static int sysc_check_active_timer(struct sysc *ddata)
* can be dropped if we stop supporting old beagleboard revisions
* A to B4 at some point.
*/
- if (sysc_soc->soc == SOC_3430)
+ if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35)
error = -ENXIO;
else
error = -EBUSY;
diff --git a/drivers/cache/Kconfig b/drivers/cache/Kconfig
new file mode 100644
index 000000000000..a57677f908f3
--- /dev/null
+++ b/drivers/cache/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+menu "Cache Drivers"
+
+config AX45MP_L2_CACHE
+ bool "Andes Technology AX45MP L2 Cache controller"
+ depends on RISCV_DMA_NONCOHERENT
+ select RISCV_NONSTANDARD_CACHE_OPS
+ help
+ Support for the L2 cache controller on Andes Technology AX45MP platforms.
+
+endmenu
diff --git a/drivers/cache/Makefile b/drivers/cache/Makefile
new file mode 100644
index 000000000000..2012e7fb978d
--- /dev/null
+++ b/drivers/cache/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_AX45MP_L2_CACHE) += ax45mp_cache.o
diff --git a/drivers/cache/ax45mp_cache.c b/drivers/cache/ax45mp_cache.c
new file mode 100644
index 000000000000..57186c58dc84
--- /dev/null
+++ b/drivers/cache/ax45mp_cache.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * non-coherent cache functions for Andes AX45MP
+ *
+ * Copyright (C) 2023 Renesas Electronics Corp.
+ */
+
+#include <linux/cacheflush.h>
+#include <linux/cacheinfo.h>
+#include <linux/dma-direction.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#include <asm/dma-noncoherent.h>
+
+/* L2 cache registers */
+#define AX45MP_L2C_REG_CTL_OFFSET 0x8
+
+#define AX45MP_L2C_REG_C0_CMD_OFFSET 0x40
+#define AX45MP_L2C_REG_C0_ACC_OFFSET 0x48
+#define AX45MP_L2C_REG_STATUS_OFFSET 0x80
+
+/* D-cache operation */
+#define AX45MP_CCTL_L1D_VA_INVAL 0 /* Invalidate an L1 cache entry */
+#define AX45MP_CCTL_L1D_VA_WB 1 /* Write-back an L1 cache entry */
+
+/* L2 CCTL status */
+#define AX45MP_CCTL_L2_STATUS_IDLE 0
+
+/* L2 CCTL status cores mask */
+#define AX45MP_CCTL_L2_STATUS_C0_MASK 0xf
+
+/* L2 cache operation */
+#define AX45MP_CCTL_L2_PA_INVAL 0x8 /* Invalidate an L2 cache entry */
+#define AX45MP_CCTL_L2_PA_WB 0x9 /* Write-back an L2 cache entry */
+
+#define AX45MP_L2C_REG_PER_CORE_OFFSET 0x10
+#define AX45MP_CCTL_L2_STATUS_PER_CORE_OFFSET 4
+
+#define AX45MP_L2C_REG_CN_CMD_OFFSET(n) \
+ (AX45MP_L2C_REG_C0_CMD_OFFSET + ((n) * AX45MP_L2C_REG_PER_CORE_OFFSET))
+#define AX45MP_L2C_REG_CN_ACC_OFFSET(n) \
+ (AX45MP_L2C_REG_C0_ACC_OFFSET + ((n) * AX45MP_L2C_REG_PER_CORE_OFFSET))
+#define AX45MP_CCTL_L2_STATUS_CN_MASK(n) \
+ (AX45MP_CCTL_L2_STATUS_C0_MASK << ((n) * AX45MP_CCTL_L2_STATUS_PER_CORE_OFFSET))
+
+#define AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM 0x80b
+#define AX45MP_CCTL_REG_UCCTLCOMMAND_NUM 0x80c
+
+#define AX45MP_CACHE_LINE_SIZE 64
+
+struct ax45mp_priv {
+ void __iomem *l2c_base;
+ u32 ax45mp_cache_line_size;
+};
+
+static struct ax45mp_priv ax45mp_priv;
+
+/* L2 Cache operations */
+static inline uint32_t ax45mp_cpu_l2c_get_cctl_status(void)
+{
+ return readl(ax45mp_priv.l2c_base + AX45MP_L2C_REG_STATUS_OFFSET);
+}
+
+static void ax45mp_cpu_cache_operation(unsigned long start, unsigned long end,
+ unsigned int l1_op, unsigned int l2_op)
+{
+ unsigned long line_size = ax45mp_priv.ax45mp_cache_line_size;
+ void __iomem *base = ax45mp_priv.l2c_base;
+ int mhartid = smp_processor_id();
+ unsigned long pa;
+
+ while (end > start) {
+ csr_write(AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM, start);
+ csr_write(AX45MP_CCTL_REG_UCCTLCOMMAND_NUM, l1_op);
+
+ pa = virt_to_phys((void *)start);
+ writel(pa, base + AX45MP_L2C_REG_CN_ACC_OFFSET(mhartid));
+ writel(l2_op, base + AX45MP_L2C_REG_CN_CMD_OFFSET(mhartid));
+ while ((ax45mp_cpu_l2c_get_cctl_status() &
+ AX45MP_CCTL_L2_STATUS_CN_MASK(mhartid)) !=
+ AX45MP_CCTL_L2_STATUS_IDLE)
+ ;
+
+ start += line_size;
+ }
+}
+
+/* Write-back L1 and L2 cache entry */
+static inline void ax45mp_cpu_dcache_wb_range(unsigned long start, unsigned long end)
+{
+ ax45mp_cpu_cache_operation(start, end, AX45MP_CCTL_L1D_VA_WB,
+ AX45MP_CCTL_L2_PA_WB);
+}
+
+/* Invalidate the L1 and L2 cache entry */
+static inline void ax45mp_cpu_dcache_inval_range(unsigned long start, unsigned long end)
+{
+ ax45mp_cpu_cache_operation(start, end, AX45MP_CCTL_L1D_VA_INVAL,
+ AX45MP_CCTL_L2_PA_INVAL);
+}
+
+static void ax45mp_dma_cache_inv(phys_addr_t paddr, size_t size)
+{
+ unsigned long start = (unsigned long)phys_to_virt(paddr);
+ unsigned long end = start + size;
+ unsigned long line_size;
+ unsigned long flags;
+
+ if (unlikely(start == end))
+ return;
+
+ line_size = ax45mp_priv.ax45mp_cache_line_size;
+
+ start = start & (~(line_size - 1));
+ end = ((end + line_size - 1) & (~(line_size - 1)));
+
+ local_irq_save(flags);
+
+ ax45mp_cpu_dcache_inval_range(start, end);
+
+ local_irq_restore(flags);
+}
+
+static void ax45mp_dma_cache_wback(phys_addr_t paddr, size_t size)
+{
+ unsigned long start = (unsigned long)phys_to_virt(paddr);
+ unsigned long end = start + size;
+ unsigned long line_size;
+ unsigned long flags;
+
+ line_size = ax45mp_priv.ax45mp_cache_line_size;
+ start = start & (~(line_size - 1));
+ local_irq_save(flags);
+ ax45mp_cpu_dcache_wb_range(start, end);
+ local_irq_restore(flags);
+}
+
+static void ax45mp_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
+{
+ ax45mp_dma_cache_wback(paddr, size);
+ ax45mp_dma_cache_inv(paddr, size);
+}
+
+static int ax45mp_get_l2_line_size(struct device_node *np)
+{
+ int ret;
+
+ ret = of_property_read_u32(np, "cache-line-size", &ax45mp_priv.ax45mp_cache_line_size);
+ if (ret) {
+ pr_err("Failed to get cache-line-size, defaulting to 64 bytes\n");
+ return ret;
+ }
+
+ if (ax45mp_priv.ax45mp_cache_line_size != AX45MP_CACHE_LINE_SIZE) {
+ pr_err("Expected cache-line-size to be 64 bytes (found:%u)\n",
+ ax45mp_priv.ax45mp_cache_line_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct riscv_nonstd_cache_ops ax45mp_cmo_ops __initdata = {
+ .wback = &ax45mp_dma_cache_wback,
+ .inv = &ax45mp_dma_cache_inv,
+ .wback_inv = &ax45mp_dma_cache_wback_inv,
+};
+
+static const struct of_device_id ax45mp_cache_ids[] = {
+ { .compatible = "andestech,ax45mp-cache" },
+ { /* sentinel */ }
+};
+
+static int __init ax45mp_cache_init(void)
+{
+ struct device_node *np;
+ struct resource res;
+ int ret;
+
+ np = of_find_matching_node(NULL, ax45mp_cache_ids);
+ if (!of_device_is_available(np))
+ return -ENODEV;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ return ret;
+
+ /*
+ * If IOCP is present on the Andes AX45MP core riscv_cbom_block_size
+ * will be 0 for sure, so we can definitely rely on it. If
+ * riscv_cbom_block_size = 0 we don't need to handle CMO using SW any
+ * more so we just return success here and only if its being set we
+ * continue further in the probe path.
+ */
+ if (!riscv_cbom_block_size)
+ return 0;
+
+ ax45mp_priv.l2c_base = ioremap(res.start, resource_size(&res));
+ if (!ax45mp_priv.l2c_base)
+ return -ENOMEM;
+
+ ret = ax45mp_get_l2_line_size(np);
+ if (ret) {
+ iounmap(ax45mp_priv.l2c_base);
+ return ret;
+ }
+
+ riscv_noncoherent_register_cache_ops(&ax45mp_cmo_ops);
+
+ return 0;
+}
+early_initcall(ax45mp_cache_init);
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index 514f9f287a78..c6f181702b9a 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -394,8 +394,6 @@ find_quicksilver(struct device *dev, void *data)
static int __init
parisc_agp_init(void)
{
- extern struct sba_device *sba_list;
-
int err = -1;
struct parisc_device *sba = NULL, *lba = NULL;
struct lba_device *lbadev = NULL;
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 23f6f2eda84c..42b1062e33cd 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -33,7 +33,7 @@ const struct class tpm_class = {
.shutdown_pre = tpm_class_shutdown,
};
const struct class tpmrm_class = {
- .name = "tmprm",
+ .name = "tpmrm",
};
dev_t tpm_devt;
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 9eb1a1859012..ea085b14ab7c 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -463,28 +463,6 @@ static bool crb_req_canceled(struct tpm_chip *chip, u8 status)
return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE;
}
-static int crb_check_flags(struct tpm_chip *chip)
-{
- u32 val;
- int ret;
-
- ret = crb_request_locality(chip, 0);
- if (ret)
- return ret;
-
- ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val, NULL);
- if (ret)
- goto release;
-
- if (val == 0x414D4400U /* AMD */)
- chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED;
-
-release:
- crb_relinquish_locality(chip, 0);
-
- return ret;
-}
-
static const struct tpm_class_ops tpm_crb = {
.flags = TPM_OPS_AUTO_STARTUP,
.status = crb_status,
@@ -797,12 +775,13 @@ static int crb_acpi_add(struct acpi_device *device)
FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
buf->header.length,
ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON);
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
crb_pluton = ACPI_ADD_PTR(struct tpm2_crb_pluton, buf, sizeof(*buf));
rc = crb_map_pluton(dev, priv, buf, crb_pluton);
if (rc)
- return rc;
+ goto out;
}
priv->sm = sm;
@@ -826,9 +805,14 @@ static int crb_acpi_add(struct acpi_device *device)
if (rc)
goto out;
- rc = crb_check_flags(chip);
- if (rc)
- goto out;
+#ifdef CONFIG_X86
+ /* A quirk for https://www.amd.com/en/support/kb/faq/pa-410 */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
+ dev_info(dev, "Disabling hwrng\n");
+ chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED;
+ }
+#endif /* CONFIG_X86 */
rc = tpm_chip_register(chip);
diff --git a/drivers/clk/clk-si521xx.c b/drivers/clk/clk-si521xx.c
index 4eaf1b53f06b..ef4ba467e747 100644
--- a/drivers/clk/clk-si521xx.c
+++ b/drivers/clk/clk-si521xx.c
@@ -96,7 +96,7 @@ static int si521xx_regmap_i2c_write(void *context, unsigned int reg,
unsigned int val)
{
struct i2c_client *i2c = context;
- const u8 data[3] = { reg, 1, val };
+ const u8 data[2] = { reg, val };
const int count = ARRAY_SIZE(data);
int ret;
@@ -146,7 +146,7 @@ static int si521xx_regmap_i2c_read(void *context, unsigned int reg,
static const struct regmap_config si521xx_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .cache_type = REGCACHE_NONE,
+ .cache_type = REGCACHE_FLAT,
.max_register = SI521XX_REG_DA,
.rd_table = &si521xx_readable_table,
.wr_table = &si521xx_writeable_table,
@@ -281,9 +281,10 @@ static int si521xx_probe(struct i2c_client *client)
{
const u16 chip_info = (u16)(uintptr_t)device_get_match_data(&client->dev);
const struct clk_parent_data clk_parent_data = { .index = 0 };
- struct si521xx *si;
+ const u8 data[3] = { SI521XX_REG_BC, 1, 1 };
unsigned char name[6] = "DIFF0";
struct clk_init_data init = {};
+ struct si521xx *si;
int i, ret;
if (!chip_info)
@@ -308,7 +309,7 @@ static int si521xx_probe(struct i2c_client *client)
"Failed to allocate register map\n");
/* Always read back 1 Byte via I2C */
- ret = regmap_write(si->regmap, SI521XX_REG_BC, 1);
+ ret = i2c_master_send(client, data, ARRAY_SIZE(data));
if (ret < 0)
return ret;
diff --git a/drivers/clk/clk-versaclock3.c b/drivers/clk/clk-versaclock3.c
index 7ab2447bd203..3d7de355f8f6 100644
--- a/drivers/clk/clk-versaclock3.c
+++ b/drivers/clk/clk-versaclock3.c
@@ -118,21 +118,21 @@ enum vc3_div {
VC3_DIV5,
};
-enum vc3_clk_mux {
- VC3_DIFF2_MUX,
- VC3_DIFF1_MUX,
- VC3_SE3_MUX,
- VC3_SE2_MUX,
- VC3_SE1_MUX,
-};
-
enum vc3_clk {
- VC3_DIFF2,
- VC3_DIFF1,
- VC3_SE3,
- VC3_SE2,
- VC3_SE1,
VC3_REF,
+ VC3_SE1,
+ VC3_SE2,
+ VC3_SE3,
+ VC3_DIFF1,
+ VC3_DIFF2,
+};
+
+enum vc3_clk_mux {
+ VC3_SE1_MUX = VC3_SE1 - 1,
+ VC3_SE2_MUX = VC3_SE2 - 1,
+ VC3_SE3_MUX = VC3_SE3 - 1,
+ VC3_DIFF1_MUX = VC3_DIFF1 - 1,
+ VC3_DIFF2_MUX = VC3_DIFF2 - 1,
};
struct vc3_clk_data {
@@ -401,11 +401,10 @@ static long vc3_pll_round_rate(struct clk_hw *hw, unsigned long rate,
/* Determine best fractional part, which is 16 bit wide */
div_frc = rate % *parent_rate;
div_frc *= BIT(16) - 1;
- do_div(div_frc, *parent_rate);
- vc3->div_frc = (u32)div_frc;
+ vc3->div_frc = min_t(u64, div64_ul(div_frc, *parent_rate), U16_MAX);
rate = (*parent_rate *
- (vc3->div_int * VC3_2_POW_16 + div_frc) / VC3_2_POW_16);
+ (vc3->div_int * VC3_2_POW_16 + vc3->div_frc) / VC3_2_POW_16);
} else {
rate = *parent_rate * vc3->div_int;
}
@@ -897,33 +896,33 @@ static struct vc3_hw_data clk_div[] = {
};
static struct vc3_hw_data clk_mux[] = {
- [VC3_DIFF2_MUX] = {
+ [VC3_SE1_MUX] = {
.data = &(struct vc3_clk_data) {
- .offs = VC3_DIFF2_CTRL_REG,
- .bitmsk = VC3_DIFF2_CTRL_REG_DIFF2_CLK_SEL
+ .offs = VC3_SE1_DIV4_CTRL,
+ .bitmsk = VC3_SE1_DIV4_CTRL_SE1_CLK_SEL
},
.hw.init = &(struct clk_init_data){
- .name = "diff2_mux",
+ .name = "se1_mux",
.ops = &vc3_clk_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_div[VC3_DIV1].hw,
- &clk_div[VC3_DIV3].hw
+ &clk_div[VC3_DIV5].hw,
+ &clk_div[VC3_DIV4].hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT
}
},
- [VC3_DIFF1_MUX] = {
+ [VC3_SE2_MUX] = {
.data = &(struct vc3_clk_data) {
- .offs = VC3_DIFF1_CTRL_REG,
- .bitmsk = VC3_DIFF1_CTRL_REG_DIFF1_CLK_SEL
+ .offs = VC3_SE2_CTRL_REG0,
+ .bitmsk = VC3_SE2_CTRL_REG0_SE2_CLK_SEL
},
.hw.init = &(struct clk_init_data){
- .name = "diff1_mux",
+ .name = "se2_mux",
.ops = &vc3_clk_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_div[VC3_DIV1].hw,
- &clk_div[VC3_DIV3].hw
+ &clk_div[VC3_DIV5].hw,
+ &clk_div[VC3_DIV4].hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT
@@ -945,33 +944,33 @@ static struct vc3_hw_data clk_mux[] = {
.flags = CLK_SET_RATE_PARENT
}
},
- [VC3_SE2_MUX] = {
+ [VC3_DIFF1_MUX] = {
.data = &(struct vc3_clk_data) {
- .offs = VC3_SE2_CTRL_REG0,
- .bitmsk = VC3_SE2_CTRL_REG0_SE2_CLK_SEL
+ .offs = VC3_DIFF1_CTRL_REG,
+ .bitmsk = VC3_DIFF1_CTRL_REG_DIFF1_CLK_SEL
},
.hw.init = &(struct clk_init_data){
- .name = "se2_mux",
+ .name = "diff1_mux",
.ops = &vc3_clk_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_div[VC3_DIV5].hw,
- &clk_div[VC3_DIV4].hw
+ &clk_div[VC3_DIV1].hw,
+ &clk_div[VC3_DIV3].hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT
}
},
- [VC3_SE1_MUX] = {
+ [VC3_DIFF2_MUX] = {
.data = &(struct vc3_clk_data) {
- .offs = VC3_SE1_DIV4_CTRL,
- .bitmsk = VC3_SE1_DIV4_CTRL_SE1_CLK_SEL
+ .offs = VC3_DIFF2_CTRL_REG,
+ .bitmsk = VC3_DIFF2_CTRL_REG_DIFF2_CLK_SEL
},
.hw.init = &(struct clk_init_data){
- .name = "se1_mux",
+ .name = "diff2_mux",
.ops = &vc3_clk_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_div[VC3_DIV5].hw,
- &clk_div[VC3_DIV4].hw
+ &clk_div[VC3_DIV1].hw,
+ &clk_div[VC3_DIV3].hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT
@@ -1110,7 +1109,7 @@ static int vc3_probe(struct i2c_client *client)
name, 0, CLK_SET_RATE_PARENT, 1, 1);
else
clk_out[i] = devm_clk_hw_register_fixed_factor_parent_hw(dev,
- name, &clk_mux[i].hw, CLK_SET_RATE_PARENT, 1, 1);
+ name, &clk_mux[i - 1].hw, CLK_SET_RATE_PARENT, 1, 1);
if (IS_ERR(clk_out[i]))
return PTR_ERR(clk_out[i]);
diff --git a/drivers/clk/sprd/ums512-clk.c b/drivers/clk/sprd/ums512-clk.c
index 8f4441dd572b..9384ecc6c741 100644
--- a/drivers/clk/sprd/ums512-clk.c
+++ b/drivers/clk/sprd/ums512-clk.c
@@ -800,7 +800,7 @@ static SPRD_MUX_CLK_DATA(uart1_clk, "uart1-clk", uart_parents,
0x250, 0, 3, UMS512_MUX_FLAG);
static const struct clk_parent_data thm_parents[] = {
- { .fw_name = "ext-32m" },
+ { .fw_name = "ext-32k" },
{ .hw = &clk_250k.hw },
};
static SPRD_MUX_CLK_DATA(thm0_clk, "thm0-clk", thm_parents,
diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c
index a9f3fb448de6..7bfba0afd778 100644
--- a/drivers/clk/tegra/clk-bpmp.c
+++ b/drivers/clk/tegra/clk-bpmp.c
@@ -159,7 +159,7 @@ static unsigned long tegra_bpmp_clk_recalc_rate(struct clk_hw *hw,
err = tegra_bpmp_clk_transfer(clk->bpmp, &msg);
if (err < 0)
- return err;
+ return 0;
return response.rate;
}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index c4d671a5a13d..0ba0dc4ecf06 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -461,13 +461,6 @@ config VF_PIT_TIMER
help
Support for Periodic Interrupt Timer on Freescale Vybrid Family SoCs.
-config OXNAS_RPS_TIMER
- bool "Oxford Semiconductor OXNAS RPS Timers driver" if COMPILE_TEST
- select TIMER_OF
- select CLKSRC_MMIO
- help
- This enables support for the Oxford Semiconductor OXNAS RPS timers.
-
config SYS_SUPPORTS_SH_CMT
bool
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 5d93c9e3fc55..368c3461dab8 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -54,7 +54,6 @@ obj-$(CONFIG_MTK_TIMER) += timer-mediatek.o
obj-$(CONFIG_MTK_CPUX_TIMER) += timer-mediatek-cpux.o
obj-$(CONFIG_CLKSRC_PISTACHIO) += timer-pistachio.o
obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
-obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
obj-$(CONFIG_OWL_TIMER) += timer-owl.o
obj-$(CONFIG_MILBEAUT_TIMER) += timer-milbeaut.o
obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index e733a2a1927a..7dd2c615bce2 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -792,6 +792,13 @@ static __always_inline void set_next_event_mem(const int access, unsigned long e
u64 cnt;
ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+
+ /* Timer must be disabled before programming CVAL */
+ if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
+ ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+ }
+
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index e56307a81f4d..8ff7cd4e20bb 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -390,7 +390,7 @@ static __always_inline u64 read_hv_clock_msr(void)
static union {
struct ms_hyperv_tsc_page page;
u8 reserved[PAGE_SIZE];
-} tsc_pg __aligned(PAGE_SIZE);
+} tsc_pg __bss_decrypted __aligned(PAGE_SIZE);
static struct ms_hyperv_tsc_page *tsc_page = &tsc_pg.page;
static unsigned long tsc_pfn;
diff --git a/drivers/clocksource/timer-loongson1-pwm.c b/drivers/clocksource/timer-loongson1-pwm.c
index 6335fee03017..244d66835508 100644
--- a/drivers/clocksource/timer-loongson1-pwm.c
+++ b/drivers/clocksource/timer-loongson1-pwm.c
@@ -28,7 +28,7 @@
#define CNTR_WIDTH 24
-DEFINE_RAW_SPINLOCK(ls1x_timer_lock);
+static DEFINE_RAW_SPINLOCK(ls1x_timer_lock);
struct ls1x_clocksource {
void __iomem *reg_base;
diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c
deleted file mode 100644
index d514b44e67dd..000000000000
--- a/drivers/clocksource/timer-oxnas-rps.c
+++ /dev/null
@@ -1,288 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * drivers/clocksource/timer-oxnas-rps.c
- *
- * Copyright (C) 2009 Oxford Semiconductor Ltd
- * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com>
- * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-#include <linux/clockchips.h>
-#include <linux/sched_clock.h>
-
-/* TIMER1 used as tick
- * TIMER2 used as clocksource
- */
-
-/* Registers definitions */
-
-#define TIMER_LOAD_REG 0x0
-#define TIMER_CURR_REG 0x4
-#define TIMER_CTRL_REG 0x8
-#define TIMER_CLRINT_REG 0xC
-
-#define TIMER_BITS 24
-
-#define TIMER_MAX_VAL (BIT(TIMER_BITS) - 1)
-
-#define TIMER_PERIODIC BIT(6)
-#define TIMER_ENABLE BIT(7)
-
-#define TIMER_DIV1 (0)
-#define TIMER_DIV16 (1 << 2)
-#define TIMER_DIV256 (2 << 2)
-
-#define TIMER1_REG_OFFSET 0
-#define TIMER2_REG_OFFSET 0x20
-
-/* Clockevent & Clocksource data */
-
-struct oxnas_rps_timer {
- struct clock_event_device clkevent;
- void __iomem *clksrc_base;
- void __iomem *clkevt_base;
- unsigned long timer_period;
- unsigned int timer_prescaler;
- struct clk *clk;
- int irq;
-};
-
-static irqreturn_t oxnas_rps_timer_irq(int irq, void *dev_id)
-{
- struct oxnas_rps_timer *rps = dev_id;
-
- writel_relaxed(0, rps->clkevt_base + TIMER_CLRINT_REG);
-
- rps->clkevent.event_handler(&rps->clkevent);
-
- return IRQ_HANDLED;
-}
-
-static void oxnas_rps_timer_config(struct oxnas_rps_timer *rps,
- unsigned long period,
- unsigned int periodic)
-{
- uint32_t cfg = rps->timer_prescaler;
-
- if (period)
- cfg |= TIMER_ENABLE;
-
- if (periodic)
- cfg |= TIMER_PERIODIC;
-
- writel_relaxed(period, rps->clkevt_base + TIMER_LOAD_REG);
- writel_relaxed(cfg, rps->clkevt_base + TIMER_CTRL_REG);
-}
-
-static int oxnas_rps_timer_shutdown(struct clock_event_device *evt)
-{
- struct oxnas_rps_timer *rps =
- container_of(evt, struct oxnas_rps_timer, clkevent);
-
- oxnas_rps_timer_config(rps, 0, 0);
-
- return 0;
-}
-
-static int oxnas_rps_timer_set_periodic(struct clock_event_device *evt)
-{
- struct oxnas_rps_timer *rps =
- container_of(evt, struct oxnas_rps_timer, clkevent);
-
- oxnas_rps_timer_config(rps, rps->timer_period, 1);
-
- return 0;
-}
-
-static int oxnas_rps_timer_set_oneshot(struct clock_event_device *evt)
-{
- struct oxnas_rps_timer *rps =
- container_of(evt, struct oxnas_rps_timer, clkevent);
-
- oxnas_rps_timer_config(rps, rps->timer_period, 0);
-
- return 0;
-}
-
-static int oxnas_rps_timer_next_event(unsigned long delta,
- struct clock_event_device *evt)
-{
- struct oxnas_rps_timer *rps =
- container_of(evt, struct oxnas_rps_timer, clkevent);
-
- oxnas_rps_timer_config(rps, delta, 0);
-
- return 0;
-}
-
-static int __init oxnas_rps_clockevent_init(struct oxnas_rps_timer *rps)
-{
- ulong clk_rate = clk_get_rate(rps->clk);
- ulong timer_rate;
-
- /* Start with prescaler 1 */
- rps->timer_prescaler = TIMER_DIV1;
- rps->timer_period = DIV_ROUND_UP(clk_rate, HZ);
- timer_rate = clk_rate;
-
- if (rps->timer_period > TIMER_MAX_VAL) {
- rps->timer_prescaler = TIMER_DIV16;
- timer_rate = clk_rate / 16;
- rps->timer_period = DIV_ROUND_UP(timer_rate, HZ);
- }
- if (rps->timer_period > TIMER_MAX_VAL) {
- rps->timer_prescaler = TIMER_DIV256;
- timer_rate = clk_rate / 256;
- rps->timer_period = DIV_ROUND_UP(timer_rate, HZ);
- }
-
- rps->clkevent.name = "oxnas-rps";
- rps->clkevent.features = CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_ONESHOT |
- CLOCK_EVT_FEAT_DYNIRQ;
- rps->clkevent.tick_resume = oxnas_rps_timer_shutdown;
- rps->clkevent.set_state_shutdown = oxnas_rps_timer_shutdown;
- rps->clkevent.set_state_periodic = oxnas_rps_timer_set_periodic;
- rps->clkevent.set_state_oneshot = oxnas_rps_timer_set_oneshot;
- rps->clkevent.set_next_event = oxnas_rps_timer_next_event;
- rps->clkevent.rating = 200;
- rps->clkevent.cpumask = cpu_possible_mask;
- rps->clkevent.irq = rps->irq;
- clockevents_config_and_register(&rps->clkevent,
- timer_rate,
- 1,
- TIMER_MAX_VAL);
-
- pr_info("Registered clock event rate %luHz prescaler %x period %lu\n",
- clk_rate,
- rps->timer_prescaler,
- rps->timer_period);
-
- return 0;
-}
-
-/* Clocksource */
-
-static void __iomem *timer_sched_base;
-
-static u64 notrace oxnas_rps_read_sched_clock(void)
-{
- return ~readl_relaxed(timer_sched_base);
-}
-
-static int __init oxnas_rps_clocksource_init(struct oxnas_rps_timer *rps)
-{
- ulong clk_rate = clk_get_rate(rps->clk);
- int ret;
-
- /* use prescale 16 */
- clk_rate = clk_rate / 16;
-
- writel_relaxed(TIMER_MAX_VAL, rps->clksrc_base + TIMER_LOAD_REG);
- writel_relaxed(TIMER_PERIODIC | TIMER_ENABLE | TIMER_DIV16,
- rps->clksrc_base + TIMER_CTRL_REG);
-
- timer_sched_base = rps->clksrc_base + TIMER_CURR_REG;
- sched_clock_register(oxnas_rps_read_sched_clock,
- TIMER_BITS, clk_rate);
- ret = clocksource_mmio_init(timer_sched_base,
- "oxnas_rps_clocksource_timer",
- clk_rate, 250, TIMER_BITS,
- clocksource_mmio_readl_down);
- if (WARN_ON(ret)) {
- pr_err("can't register clocksource\n");
- return ret;
- }
-
- pr_info("Registered clocksource rate %luHz\n", clk_rate);
-
- return 0;
-}
-
-static int __init oxnas_rps_timer_init(struct device_node *np)
-{
- struct oxnas_rps_timer *rps;
- void __iomem *base;
- int ret;
-
- rps = kzalloc(sizeof(*rps), GFP_KERNEL);
- if (!rps)
- return -ENOMEM;
-
- rps->clk = of_clk_get(np, 0);
- if (IS_ERR(rps->clk)) {
- ret = PTR_ERR(rps->clk);
- goto err_alloc;
- }
-
- ret = clk_prepare_enable(rps->clk);
- if (ret)
- goto err_clk;
-
- base = of_iomap(np, 0);
- if (!base) {
- ret = -ENXIO;
- goto err_clk_prepare;
- }
-
- rps->irq = irq_of_parse_and_map(np, 0);
- if (!rps->irq) {
- ret = -EINVAL;
- goto err_iomap;
- }
-
- rps->clkevt_base = base + TIMER1_REG_OFFSET;
- rps->clksrc_base = base + TIMER2_REG_OFFSET;
-
- /* Disable timers */
- writel_relaxed(0, rps->clkevt_base + TIMER_CTRL_REG);
- writel_relaxed(0, rps->clksrc_base + TIMER_CTRL_REG);
- writel_relaxed(0, rps->clkevt_base + TIMER_LOAD_REG);
- writel_relaxed(0, rps->clksrc_base + TIMER_LOAD_REG);
- writel_relaxed(0, rps->clkevt_base + TIMER_CLRINT_REG);
- writel_relaxed(0, rps->clksrc_base + TIMER_CLRINT_REG);
-
- ret = request_irq(rps->irq, oxnas_rps_timer_irq,
- IRQF_TIMER | IRQF_IRQPOLL,
- "rps-timer", rps);
- if (ret)
- goto err_iomap;
-
- ret = oxnas_rps_clocksource_init(rps);
- if (ret)
- goto err_irqreq;
-
- ret = oxnas_rps_clockevent_init(rps);
- if (ret)
- goto err_irqreq;
-
- return 0;
-
-err_irqreq:
- free_irq(rps->irq, rps);
-err_iomap:
- iounmap(base);
-err_clk_prepare:
- clk_disable_unprepare(rps->clk);
-err_clk:
- clk_put(rps->clk);
-err_alloc:
- kfree(rps);
-
- return ret;
-}
-
-TIMER_OF_DECLARE(ox810se_rps,
- "oxsemi,ox810se-rps-timer", oxnas_rps_timer_init);
-TIMER_OF_DECLARE(ox820_rps,
- "oxsemi,ox820-rps-timer", oxnas_rps_timer_init);
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 7d5fa9069906..69fee3540d37 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -16,9 +16,7 @@
#include <linux/irqreturn.h>
#include <linux/reset.h>
#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#define TIMER_IRQ_EN_REG 0x00
#define TIMER_IRQ_EN(val) BIT(val)
@@ -40,26 +38,16 @@ struct sun5i_timer {
struct clk *clk;
struct notifier_block clk_rate_cb;
u32 ticks_per_jiffy;
-};
-
-#define to_sun5i_timer(x) \
- container_of(x, struct sun5i_timer, clk_rate_cb)
-
-struct sun5i_timer_clksrc {
- struct sun5i_timer timer;
struct clocksource clksrc;
-};
-
-#define to_sun5i_timer_clksrc(x) \
- container_of(x, struct sun5i_timer_clksrc, clksrc)
-
-struct sun5i_timer_clkevt {
- struct sun5i_timer timer;
struct clock_event_device clkevt;
};
-#define to_sun5i_timer_clkevt(x) \
- container_of(x, struct sun5i_timer_clkevt, clkevt)
+#define nb_to_sun5i_timer(x) \
+ container_of(x, struct sun5i_timer, clk_rate_cb)
+#define clksrc_to_sun5i_timer(x) \
+ container_of(x, struct sun5i_timer, clksrc)
+#define clkevt_to_sun5i_timer(x) \
+ container_of(x, struct sun5i_timer, clkevt)
/*
* When we disable a timer, we need to wait at least for 2 cycles of
@@ -67,30 +55,30 @@ struct sun5i_timer_clkevt {
* that is already setup and runs at the same frequency than the other
* timers, and we never will be disabled.
*/
-static void sun5i_clkevt_sync(struct sun5i_timer_clkevt *ce)
+static void sun5i_clkevt_sync(struct sun5i_timer *ce)
{
- u32 old = readl(ce->timer.base + TIMER_CNTVAL_LO_REG(1));
+ u32 old = readl(ce->base + TIMER_CNTVAL_LO_REG(1));
- while ((old - readl(ce->timer.base + TIMER_CNTVAL_LO_REG(1))) < TIMER_SYNC_TICKS)
+ while ((old - readl(ce->base + TIMER_CNTVAL_LO_REG(1))) < TIMER_SYNC_TICKS)
cpu_relax();
}
-static void sun5i_clkevt_time_stop(struct sun5i_timer_clkevt *ce, u8 timer)
+static void sun5i_clkevt_time_stop(struct sun5i_timer *ce, u8 timer)
{
- u32 val = readl(ce->timer.base + TIMER_CTL_REG(timer));
- writel(val & ~TIMER_CTL_ENABLE, ce->timer.base + TIMER_CTL_REG(timer));
+ u32 val = readl(ce->base + TIMER_CTL_REG(timer));
+ writel(val & ~TIMER_CTL_ENABLE, ce->base + TIMER_CTL_REG(timer));
sun5i_clkevt_sync(ce);
}
-static void sun5i_clkevt_time_setup(struct sun5i_timer_clkevt *ce, u8 timer, u32 delay)
+static void sun5i_clkevt_time_setup(struct sun5i_timer *ce, u8 timer, u32 delay)
{
- writel(delay, ce->timer.base + TIMER_INTVAL_LO_REG(timer));
+ writel(delay, ce->base + TIMER_INTVAL_LO_REG(timer));
}
-static void sun5i_clkevt_time_start(struct sun5i_timer_clkevt *ce, u8 timer, bool periodic)
+static void sun5i_clkevt_time_start(struct sun5i_timer *ce, u8 timer, bool periodic)
{
- u32 val = readl(ce->timer.base + TIMER_CTL_REG(timer));
+ u32 val = readl(ce->base + TIMER_CTL_REG(timer));
if (periodic)
val &= ~TIMER_CTL_ONESHOT;
@@ -98,12 +86,12 @@ static void sun5i_clkevt_time_start(struct sun5i_timer_clkevt *ce, u8 timer, boo
val |= TIMER_CTL_ONESHOT;
writel(val | TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
- ce->timer.base + TIMER_CTL_REG(timer));
+ ce->base + TIMER_CTL_REG(timer));
}
static int sun5i_clkevt_shutdown(struct clock_event_device *clkevt)
{
- struct sun5i_timer_clkevt *ce = to_sun5i_timer_clkevt(clkevt);
+ struct sun5i_timer *ce = clkevt_to_sun5i_timer(clkevt);
sun5i_clkevt_time_stop(ce, 0);
return 0;
@@ -111,7 +99,7 @@ static int sun5i_clkevt_shutdown(struct clock_event_device *clkevt)
static int sun5i_clkevt_set_oneshot(struct clock_event_device *clkevt)
{
- struct sun5i_timer_clkevt *ce = to_sun5i_timer_clkevt(clkevt);
+ struct sun5i_timer *ce = clkevt_to_sun5i_timer(clkevt);
sun5i_clkevt_time_stop(ce, 0);
sun5i_clkevt_time_start(ce, 0, false);
@@ -120,10 +108,10 @@ static int sun5i_clkevt_set_oneshot(struct clock_event_device *clkevt)
static int sun5i_clkevt_set_periodic(struct clock_event_device *clkevt)
{
- struct sun5i_timer_clkevt *ce = to_sun5i_timer_clkevt(clkevt);
+ struct sun5i_timer *ce = clkevt_to_sun5i_timer(clkevt);
sun5i_clkevt_time_stop(ce, 0);
- sun5i_clkevt_time_setup(ce, 0, ce->timer.ticks_per_jiffy);
+ sun5i_clkevt_time_setup(ce, 0, ce->ticks_per_jiffy);
sun5i_clkevt_time_start(ce, 0, true);
return 0;
}
@@ -131,7 +119,7 @@ static int sun5i_clkevt_set_periodic(struct clock_event_device *clkevt)
static int sun5i_clkevt_next_event(unsigned long evt,
struct clock_event_device *clkevt)
{
- struct sun5i_timer_clkevt *ce = to_sun5i_timer_clkevt(clkevt);
+ struct sun5i_timer *ce = clkevt_to_sun5i_timer(clkevt);
sun5i_clkevt_time_stop(ce, 0);
sun5i_clkevt_time_setup(ce, 0, evt - TIMER_SYNC_TICKS);
@@ -142,9 +130,9 @@ static int sun5i_clkevt_next_event(unsigned long evt,
static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id)
{
- struct sun5i_timer_clkevt *ce = dev_id;
+ struct sun5i_timer *ce = dev_id;
- writel(0x1, ce->timer.base + TIMER_IRQ_ST_REG);
+ writel(0x1, ce->base + TIMER_IRQ_ST_REG);
ce->clkevt.event_handler(&ce->clkevt);
return IRQ_HANDLED;
@@ -152,17 +140,16 @@ static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id)
static u64 sun5i_clksrc_read(struct clocksource *clksrc)
{
- struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc);
+ struct sun5i_timer *cs = clksrc_to_sun5i_timer(clksrc);
- return ~readl(cs->timer.base + TIMER_CNTVAL_LO_REG(1));
+ return ~readl(cs->base + TIMER_CNTVAL_LO_REG(1));
}
-static int sun5i_rate_cb_clksrc(struct notifier_block *nb,
- unsigned long event, void *data)
+static int sun5i_rate_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
{
struct clk_notifier_data *ndata = data;
- struct sun5i_timer *timer = to_sun5i_timer(nb);
- struct sun5i_timer_clksrc *cs = container_of(timer, struct sun5i_timer_clksrc, timer);
+ struct sun5i_timer *cs = nb_to_sun5i_timer(nb);
switch (event) {
case PRE_RATE_CHANGE:
@@ -171,6 +158,8 @@ static int sun5i_rate_cb_clksrc(struct notifier_block *nb,
case POST_RATE_CHANGE:
clocksource_register_hz(&cs->clksrc, ndata->new_rate);
+ clockevents_update_freq(&cs->clkevt, ndata->new_rate);
+ cs->ticks_per_jiffy = DIV_ROUND_UP(ndata->new_rate, HZ);
break;
default:
@@ -180,47 +169,18 @@ static int sun5i_rate_cb_clksrc(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static int __init sun5i_setup_clocksource(struct device_node *node,
- void __iomem *base,
- struct clk *clk, int irq)
+static int sun5i_setup_clocksource(struct platform_device *pdev,
+ unsigned long rate)
{
- struct sun5i_timer_clksrc *cs;
- unsigned long rate;
+ struct sun5i_timer *cs = platform_get_drvdata(pdev);
+ void __iomem *base = cs->base;
int ret;
- cs = kzalloc(sizeof(*cs), GFP_KERNEL);
- if (!cs)
- return -ENOMEM;
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- pr_err("Couldn't enable parent clock\n");
- goto err_free;
- }
-
- rate = clk_get_rate(clk);
- if (!rate) {
- pr_err("Couldn't get parent clock rate\n");
- ret = -EINVAL;
- goto err_disable_clk;
- }
-
- cs->timer.base = base;
- cs->timer.clk = clk;
- cs->timer.clk_rate_cb.notifier_call = sun5i_rate_cb_clksrc;
- cs->timer.clk_rate_cb.next = NULL;
-
- ret = clk_notifier_register(clk, &cs->timer.clk_rate_cb);
- if (ret) {
- pr_err("Unable to register clock notifier.\n");
- goto err_disable_clk;
- }
-
writel(~0, base + TIMER_INTVAL_LO_REG(1));
writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
base + TIMER_CTL_REG(1));
- cs->clksrc.name = node->name;
+ cs->clksrc.name = pdev->dev.of_node->name;
cs->clksrc.rating = 340;
cs->clksrc.read = sun5i_clksrc_read;
cs->clksrc.mask = CLOCKSOURCE_MASK(32);
@@ -228,74 +188,23 @@ static int __init sun5i_setup_clocksource(struct device_node *node,
ret = clocksource_register_hz(&cs->clksrc, rate);
if (ret) {
- pr_err("Couldn't register clock source.\n");
- goto err_remove_notifier;
+ dev_err(&pdev->dev, "Couldn't register clock source.\n");
+ return ret;
}
return 0;
-
-err_remove_notifier:
- clk_notifier_unregister(clk, &cs->timer.clk_rate_cb);
-err_disable_clk:
- clk_disable_unprepare(clk);
-err_free:
- kfree(cs);
- return ret;
}
-static int sun5i_rate_cb_clkevt(struct notifier_block *nb,
- unsigned long event, void *data)
+static int sun5i_setup_clockevent(struct platform_device *pdev,
+ unsigned long rate, int irq)
{
- struct clk_notifier_data *ndata = data;
- struct sun5i_timer *timer = to_sun5i_timer(nb);
- struct sun5i_timer_clkevt *ce = container_of(timer, struct sun5i_timer_clkevt, timer);
-
- if (event == POST_RATE_CHANGE) {
- clockevents_update_freq(&ce->clkevt, ndata->new_rate);
- ce->timer.ticks_per_jiffy = DIV_ROUND_UP(ndata->new_rate, HZ);
- }
-
- return NOTIFY_DONE;
-}
-
-static int __init sun5i_setup_clockevent(struct device_node *node, void __iomem *base,
- struct clk *clk, int irq)
-{
- struct sun5i_timer_clkevt *ce;
- unsigned long rate;
+ struct device *dev = &pdev->dev;
+ struct sun5i_timer *ce = platform_get_drvdata(pdev);
+ void __iomem *base = ce->base;
int ret;
u32 val;
- ce = kzalloc(sizeof(*ce), GFP_KERNEL);
- if (!ce)
- return -ENOMEM;
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- pr_err("Couldn't enable parent clock\n");
- goto err_free;
- }
-
- rate = clk_get_rate(clk);
- if (!rate) {
- pr_err("Couldn't get parent clock rate\n");
- ret = -EINVAL;
- goto err_disable_clk;
- }
-
- ce->timer.base = base;
- ce->timer.ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
- ce->timer.clk = clk;
- ce->timer.clk_rate_cb.notifier_call = sun5i_rate_cb_clkevt;
- ce->timer.clk_rate_cb.next = NULL;
-
- ret = clk_notifier_register(clk, &ce->timer.clk_rate_cb);
- if (ret) {
- pr_err("Unable to register clock notifier.\n");
- goto err_disable_clk;
- }
-
- ce->clkevt.name = node->name;
+ ce->clkevt.name = dev->of_node->name;
ce->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
ce->clkevt.set_next_event = sun5i_clkevt_next_event;
ce->clkevt.set_state_shutdown = sun5i_clkevt_shutdown;
@@ -313,60 +222,109 @@ static int __init sun5i_setup_clockevent(struct device_node *node, void __iomem
clockevents_config_and_register(&ce->clkevt, rate,
TIMER_SYNC_TICKS, 0xffffffff);
- ret = request_irq(irq, sun5i_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
- "sun5i_timer0", ce);
+ ret = devm_request_irq(dev, irq, sun5i_timer_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL,
+ "sun5i_timer0", ce);
if (ret) {
- pr_err("Unable to register interrupt\n");
- goto err_remove_notifier;
+ dev_err(dev, "Unable to register interrupt\n");
+ return ret;
}
return 0;
-
-err_remove_notifier:
- clk_notifier_unregister(clk, &ce->timer.clk_rate_cb);
-err_disable_clk:
- clk_disable_unprepare(clk);
-err_free:
- kfree(ce);
- return ret;
}
-static int __init sun5i_timer_init(struct device_node *node)
+static int sun5i_timer_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct sun5i_timer *st;
struct reset_control *rstc;
void __iomem *timer_base;
struct clk *clk;
+ unsigned long rate;
int irq, ret;
- timer_base = of_io_request_and_map(node, 0, of_node_full_name(node));
+ st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, st);
+
+ timer_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(timer_base)) {
- pr_err("Can't map registers\n");
+ dev_err(dev, "Can't map registers\n");
return PTR_ERR(timer_base);
}
- irq = irq_of_parse_and_map(node, 0);
- if (irq <= 0) {
- pr_err("Can't parse IRQ\n");
- return -EINVAL;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "Can't get IRQ\n");
+ return irq;
}
- clk = of_clk_get(node, 0);
+ clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk)) {
- pr_err("Can't get timer clock\n");
+ dev_err(dev, "Can't get timer clock\n");
return PTR_ERR(clk);
}
- rstc = of_reset_control_get(node, NULL);
- if (!IS_ERR(rstc))
+ rate = clk_get_rate(clk);
+ if (!rate) {
+ dev_err(dev, "Couldn't get parent clock rate\n");
+ return -EINVAL;
+ }
+
+ st->base = timer_base;
+ st->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
+ st->clk = clk;
+ st->clk_rate_cb.notifier_call = sun5i_rate_cb;
+ st->clk_rate_cb.next = NULL;
+
+ ret = devm_clk_notifier_register(dev, clk, &st->clk_rate_cb);
+ if (ret) {
+ dev_err(dev, "Unable to register clock notifier.\n");
+ return ret;
+ }
+
+ rstc = devm_reset_control_get_optional_exclusive(dev, NULL);
+ if (rstc)
reset_control_deassert(rstc);
- ret = sun5i_setup_clocksource(node, timer_base, clk, irq);
+ ret = sun5i_setup_clocksource(pdev, rate);
if (ret)
return ret;
- return sun5i_setup_clockevent(node, timer_base, clk, irq);
+ ret = sun5i_setup_clockevent(pdev, rate, irq);
+ if (ret)
+ goto err_unreg_clocksource;
+
+ return 0;
+
+err_unreg_clocksource:
+ clocksource_unregister(&st->clksrc);
+ return ret;
}
-TIMER_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer",
- sun5i_timer_init);
-TIMER_OF_DECLARE(sun7i_a20, "allwinner,sun7i-a20-hstimer",
- sun5i_timer_init);
+
+static void sun5i_timer_remove(struct platform_device *pdev)
+{
+ struct sun5i_timer *st = platform_get_drvdata(pdev);
+
+ clocksource_unregister(&st->clksrc);
+}
+
+static const struct of_device_id sun5i_timer_of_match[] = {
+ { .compatible = "allwinner,sun5i-a13-hstimer" },
+ { .compatible = "allwinner,sun7i-a20-hstimer" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sun5i_timer_of_match);
+
+static struct platform_driver sun5i_timer_driver = {
+ .probe = sun5i_timer_probe,
+ .remove_new = sun5i_timer_remove,
+ .driver = {
+ .name = "sun5i-timer",
+ .of_match_table = sun5i_timer_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+module_platform_driver(sun5i_timer_driver);
diff --git a/drivers/comedi/Kconfig b/drivers/comedi/Kconfig
index 7a8d402f05be..9af280735cba 100644
--- a/drivers/comedi/Kconfig
+++ b/drivers/comedi/Kconfig
@@ -67,7 +67,6 @@ config COMEDI_TEST
config COMEDI_PARPORT
tristate "Parallel port support"
- depends on HAS_IOPORT
help
Enable support for the standard parallel port.
A cheap and easy way to get a few more digital I/O lines. Steal
@@ -80,7 +79,6 @@ config COMEDI_PARPORT
config COMEDI_SSV_DNP
tristate "SSV Embedded Systems DIL/Net-PC support"
depends on X86_32 || COMPILE_TEST
- depends on HAS_IOPORT
help
Enable support for SSV Embedded Systems DIL/Net-PC
@@ -91,7 +89,6 @@ endif # COMEDI_MISC_DRIVERS
menuconfig COMEDI_ISA_DRIVERS
bool "Comedi ISA and PC/104 drivers"
- depends on ISA
help
Enable comedi ISA and PC/104 drivers to be built
@@ -103,8 +100,7 @@ if COMEDI_ISA_DRIVERS
config COMEDI_PCL711
tristate "Advantech PCL-711/711b and ADlink ACL-8112 ISA card support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for Advantech PCL-711 and 711b, ADlink ACL-8112
@@ -165,9 +161,8 @@ config COMEDI_PCL730
config COMEDI_PCL812
tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216"
- depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink
ACL-8112DG/HG/PG, ACL-8113, ACL-8216, ICP DAS A-821PGH/PGL/PGL-NDA,
@@ -178,9 +173,8 @@ config COMEDI_PCL812
config COMEDI_PCL816
tristate "Advantech PCL-814 and PCL-816 ISA card support"
- depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for Advantech PCL-814 and PCL-816 ISA cards
@@ -189,9 +183,8 @@ config COMEDI_PCL816
config COMEDI_PCL818
tristate "Advantech PCL-718 and PCL-818 ISA card support"
- depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for Advantech PCL-818 ISA cards
PCL-818L, PCL-818H, PCL-818HD, PCL-818HG, PCL-818 and PCL-718
@@ -210,7 +203,7 @@ config COMEDI_PCM3724
config COMEDI_AMPLC_DIO200_ISA
tristate "Amplicon PC212E/PC214E/PC215E/PC218E/PC272E"
- depends on COMEDI_AMPLC_DIO200
+ select COMEDI_AMPLC_DIO200
help
Enable support for Amplicon PC212E, PC214E, PC215E, PC218E and
PC272E ISA DIO boards
@@ -262,8 +255,7 @@ config COMEDI_DAC02
config COMEDI_DAS16M1
tristate "MeasurementComputing CIO-DAS16/M1DAS-16 ISA card support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
select COMEDI_8255
help
Enable support for Measurement Computing CIO-DAS16/M1 ISA cards.
@@ -273,7 +265,7 @@ config COMEDI_DAS16M1
config COMEDI_DAS08_ISA
tristate "DAS-08 compatible ISA and PC/104 card support"
- depends on COMEDI_DAS08
+ select COMEDI_DAS08
help
Enable support for Keithley Metrabyte/ComputerBoards DAS08
and compatible ISA and PC/104 cards:
@@ -286,9 +278,8 @@ config COMEDI_DAS08_ISA
config COMEDI_DAS16
tristate "DAS-16 compatible ISA and PC/104 card support"
- depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
- depends on COMEDI_8254
+ select COMEDI_8254
select COMEDI_8255
help
Enable support for Keithley Metrabyte/ComputerBoards DAS16
@@ -305,8 +296,7 @@ config COMEDI_DAS16
config COMEDI_DAS800
tristate "DAS800 and compatible ISA card support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for Keithley Metrabyte DAS800 and compatible ISA cards
Keithley Metrabyte DAS-800, DAS-801, DAS-802
@@ -318,9 +308,8 @@ config COMEDI_DAS800
config COMEDI_DAS1800
tristate "DAS1800 and compatible ISA card support"
- depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for DAS1800 and compatible ISA cards
Keithley Metrabyte DAS-1701ST, DAS-1701ST-DA, DAS-1701/AO,
@@ -334,8 +323,7 @@ config COMEDI_DAS1800
config COMEDI_DAS6402
tristate "DAS6402 and compatible ISA card support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for DAS6402 and compatible ISA cards
Computerboards, Keithley Metrabyte DAS6402 and compatibles
@@ -414,8 +402,7 @@ config COMEDI_FL512
config COMEDI_AIO_AIO12_8
tristate "I/O Products PC/104 AIO12-8 Analog I/O Board support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
select COMEDI_8255
help
Enable support for I/O Products PC/104 AIO12-8 Analog I/O Board
@@ -469,9 +456,8 @@ config COMEDI_ADQ12B
config COMEDI_NI_AT_A2150
tristate "NI AT-A2150 ISA card support"
- depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for National Instruments AT-A2150 cards
@@ -480,8 +466,7 @@ config COMEDI_NI_AT_A2150
config COMEDI_NI_AT_AO
tristate "NI AT-AO-6/10 EISA card support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for National Instruments AT-AO-6/10 cards
@@ -512,7 +497,7 @@ config COMEDI_NI_ATMIO16D
config COMEDI_NI_LABPC_ISA
tristate "NI Lab-PC and compatibles ISA support"
- depends on COMEDI_NI_LABPC
+ select COMEDI_NI_LABPC
help
Enable support for National Instruments Lab-PC and compatibles
Lab-PC-1200, Lab-PC-1200AI, Lab-PC+.
@@ -576,7 +561,7 @@ endif # COMEDI_ISA_DRIVERS
menuconfig COMEDI_PCI_DRIVERS
tristate "Comedi PCI drivers"
- depends on PCI && HAS_IOPORT
+ depends on PCI
help
Enable support for comedi PCI drivers.
@@ -725,8 +710,7 @@ config COMEDI_ADL_PCI8164
config COMEDI_ADL_PCI9111
tristate "ADLink PCI-9111HR support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for ADlink PCI9111 cards
@@ -736,7 +720,7 @@ config COMEDI_ADL_PCI9111
config COMEDI_ADL_PCI9118
tristate "ADLink PCI-9118DG, PCI-9118HG, PCI-9118HR support"
depends on HAS_DMA
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for ADlink PCI-9118DG, PCI-9118HG, PCI-9118HR cards
@@ -745,8 +729,7 @@ config COMEDI_ADL_PCI9118
config COMEDI_ADV_PCI1710
tristate "Advantech PCI-171x and PCI-1731 support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for Advantech PCI-1710, PCI-1710HG, PCI-1711,
PCI-1713 and PCI-1731
@@ -790,8 +773,7 @@ config COMEDI_ADV_PCI1760
config COMEDI_ADV_PCI_DIO
tristate "Advantech PCI DIO card support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
select COMEDI_8255
help
Enable support for Advantech PCI DIO cards
@@ -804,7 +786,7 @@ config COMEDI_ADV_PCI_DIO
config COMEDI_AMPLC_DIO200_PCI
tristate "Amplicon PCI215/PCI272/PCIe215/PCIe236/PCIe296 DIO support"
- depends on COMEDI_AMPLC_DIO200
+ select COMEDI_AMPLC_DIO200
help
Enable support for Amplicon PCI215, PCI272, PCIe215, PCIe236
and PCIe296 DIO boards.
@@ -832,8 +814,7 @@ config COMEDI_AMPLC_PC263_PCI
config COMEDI_AMPLC_PCI224
tristate "Amplicon PCI224 and PCI234 support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for Amplicon PCI224 and PCI234 AO boards
@@ -842,8 +823,7 @@ config COMEDI_AMPLC_PCI224
config COMEDI_AMPLC_PCI230
tristate "Amplicon PCI230 and PCI260 support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
select COMEDI_8255
help
Enable support for Amplicon PCI230 and PCI260 Multifunction I/O
@@ -862,7 +842,7 @@ config COMEDI_CONTEC_PCI_DIO
config COMEDI_DAS08_PCI
tristate "DAS-08 PCI support"
- depends on COMEDI_DAS08
+ select COMEDI_DAS08
help
Enable support for PCI DAS-08 cards.
@@ -949,8 +929,7 @@ config COMEDI_CB_PCIDAS64
config COMEDI_CB_PCIDAS
tristate "MeasurementComputing PCI-DAS support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
select COMEDI_8255
help
Enable support for ComputerBoards/MeasurementComputing PCI-DAS with
@@ -974,8 +953,7 @@ config COMEDI_CB_PCIDDA
config COMEDI_CB_PCIMDAS
tristate "MeasurementComputing PCIM-DAS1602/16, PCIe-DAS1602/16 support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
select COMEDI_8255
help
Enable support for ComputerBoards/MeasurementComputing PCI Migration
@@ -995,8 +973,7 @@ config COMEDI_CB_PCIMDDA
config COMEDI_ME4000
tristate "Meilhaus ME-4000 support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for Meilhaus PCI data acquisition cards
ME-4650, ME-4670i, ME-4680, ME-4680i and ME-4680is
@@ -1054,7 +1031,7 @@ config COMEDI_NI_670X
config COMEDI_NI_LABPC_PCI
tristate "NI Lab-PC PCI-1200 support"
- depends on COMEDI_NI_LABPC
+ select COMEDI_NI_LABPC
help
Enable support for National Instruments Lab-PC PCI-1200.
@@ -1076,7 +1053,6 @@ config COMEDI_NI_PCIDIO
config COMEDI_NI_PCIMIO
tristate "NI PCI-MIO-E series and M series support"
depends on HAS_DMA
- depends on HAS_IOPORT
select COMEDI_NI_TIOCMD
select COMEDI_8255
help
@@ -1098,8 +1074,7 @@ config COMEDI_NI_PCIMIO
config COMEDI_RTD520
tristate "Real Time Devices PCI4520/DM7520 support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for Real Time Devices PCI4520/DM7520
@@ -1139,8 +1114,7 @@ if COMEDI_PCMCIA_DRIVERS
config COMEDI_CB_DAS16_CS
tristate "CB DAS16 series PCMCIA support"
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
help
Enable support for the ComputerBoards/MeasurementComputing PCMCIA
cards DAS16/16, PCM-DAS16D/12 and PCM-DAS16s/16
@@ -1150,7 +1124,7 @@ config COMEDI_CB_DAS16_CS
config COMEDI_DAS08_CS
tristate "CB DAS08 PCMCIA support"
- depends on COMEDI_DAS08
+ select COMEDI_DAS08
help
Enable support for the ComputerBoards/MeasurementComputing DAS-08
PCMCIA card
@@ -1160,7 +1134,6 @@ config COMEDI_DAS08_CS
config COMEDI_NI_DAQ_700_CS
tristate "NI DAQCard-700 PCMCIA support"
- depends on HAS_IOPORT
help
Enable support for the National Instruments PCMCIA DAQCard-700 DIO
@@ -1169,7 +1142,6 @@ config COMEDI_NI_DAQ_700_CS
config COMEDI_NI_DAQ_DIO24_CS
tristate "NI DAQ-Card DIO-24 PCMCIA support"
- depends on HAS_IOPORT
select COMEDI_8255
help
Enable support for the National Instruments PCMCIA DAQ-Card DIO-24
@@ -1179,7 +1151,7 @@ config COMEDI_NI_DAQ_DIO24_CS
config COMEDI_NI_LABPC_CS
tristate "NI DAQCard-1200 PCMCIA support"
- depends on COMEDI_NI_LABPC
+ select COMEDI_NI_LABPC
help
Enable support for the National Instruments PCMCIA DAQCard-1200
@@ -1188,7 +1160,6 @@ config COMEDI_NI_LABPC_CS
config COMEDI_NI_MIO_CS
tristate "NI DAQCard E series PCMCIA support"
- depends on HAS_IOPORT
select COMEDI_NI_TIO
select COMEDI_8255
help
@@ -1201,7 +1172,6 @@ config COMEDI_NI_MIO_CS
config COMEDI_QUATECH_DAQP_CS
tristate "Quatech DAQP PCMCIA data capture card support"
- depends on HAS_IOPORT
help
Enable support for the Quatech DAQP PCMCIA data capture cards
DAQP-208 and DAQP-308
@@ -1278,14 +1248,12 @@ endif # COMEDI_USB_DRIVERS
config COMEDI_8254
tristate
- depends on HAS_IOPORT
config COMEDI_8255
tristate
config COMEDI_8255_SA
tristate "Standalone 8255 support"
- depends on HAS_IOPORT
select COMEDI_8255
help
Enable support for 8255 digital I/O as a standalone driver.
@@ -1317,7 +1285,7 @@ config COMEDI_KCOMEDILIB
called kcomedilib.
config COMEDI_AMPLC_DIO200
- depends on COMEDI_8254
+ select COMEDI_8254
tristate
config COMEDI_AMPLC_PC236
@@ -1326,7 +1294,7 @@ config COMEDI_AMPLC_PC236
config COMEDI_DAS08
tristate
- depends on COMEDI_8254
+ select COMEDI_8254
select COMEDI_8255
config COMEDI_ISADMA
@@ -1334,8 +1302,7 @@ config COMEDI_ISADMA
config COMEDI_NI_LABPC
tristate
- depends on HAS_IOPORT
- depends on COMEDI_8254
+ select COMEDI_8254
select COMEDI_8255
config COMEDI_NI_LABPC_ISADMA
diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig
index 62962ae84b77..497bc05dca4d 100644
--- a/drivers/counter/Kconfig
+++ b/drivers/counter/Kconfig
@@ -92,7 +92,7 @@ config MICROCHIP_TCB_CAPTURE
config RZ_MTU3_CNT
tristate "Renesas RZ/G2L MTU3a counter driver"
- depends on RZ_MTU3 || COMPILE_TEST
+ depends on RZ_MTU3
help
Enable support for MTU3a counter driver found on Renesas RZ/G2L alike
SoCs. This IP supports both 16-bit and 32-bit phase counting mode
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a757f90aa9d6..60ed89000e82 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -86,6 +86,7 @@ static void cpufreq_governor_limits(struct cpufreq_policy *policy);
static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_governor *new_gov,
unsigned int new_pol);
+static bool cpufreq_boost_supported(void);
/*
* Two notifier lists: the "policy" list is involved in the
@@ -455,8 +456,10 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
policy->cur,
policy->cpuinfo.max_freq);
+ spin_lock(&policy->transition_lock);
policy->transition_ongoing = false;
policy->transition_task = NULL;
+ spin_unlock(&policy->transition_lock);
wake_up(&policy->transition_wait);
}
@@ -621,6 +624,40 @@ static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
}
define_one_global_rw(boost);
+static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", policy->boost_enabled);
+}
+
+static ssize_t store_local_boost(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ int ret, enable;
+
+ ret = kstrtoint(buf, 10, &enable);
+ if (ret || enable < 0 || enable > 1)
+ return -EINVAL;
+
+ if (!cpufreq_driver->boost_enabled)
+ return -EINVAL;
+
+ if (policy->boost_enabled == enable)
+ return count;
+
+ cpus_read_lock();
+ ret = cpufreq_driver->set_boost(policy, enable);
+ cpus_read_unlock();
+
+ if (ret)
+ return ret;
+
+ policy->boost_enabled = enable;
+
+ return count;
+}
+
+static struct freq_attr local_boost = __ATTR(boost, 0644, show_local_boost, store_local_boost);
+
static struct cpufreq_governor *find_governor(const char *str_governor)
{
struct cpufreq_governor *t;
@@ -1055,6 +1092,12 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
return ret;
}
+ if (cpufreq_boost_supported()) {
+ ret = sysfs_create_file(&policy->kobj, &local_boost.attr);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -1943,16 +1986,16 @@ void cpufreq_resume(void)
for_each_active_policy(policy) {
if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
- pr_err("%s: Failed to resume driver: %p\n", __func__,
- policy);
+ pr_err("%s: Failed to resume driver: %s\n", __func__,
+ cpufreq_driver->name);
} else if (has_target()) {
down_write(&policy->rwsem);
ret = cpufreq_start_governor(policy);
up_write(&policy->rwsem);
if (ret)
- pr_err("%s: Failed to start governor for policy: %p\n",
- __func__, policy);
+ pr_err("%s: Failed to start governor for CPU%u's policy\n",
+ __func__, policy->cpu);
}
}
}
@@ -2716,6 +2759,8 @@ int cpufreq_boost_trigger_state(int state)
ret = cpufreq_driver->set_boost(policy, state);
if (ret)
goto err_reset_state;
+
+ policy->boost_enabled = state;
}
cpus_read_unlock();
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 85da677c43d6..af44ee6a6430 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -439,7 +439,7 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
ret = gov->init(dbs_data);
if (ret)
- goto free_policy_dbs_info;
+ goto free_dbs_data;
/*
* The sampling interval should not be less than the transition latency
@@ -474,6 +474,8 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
if (!have_governor_per_policy())
gov->gdbs_data = NULL;
gov->exit(dbs_data);
+
+free_dbs_data:
kfree(dbs_data);
free_policy_dbs_info:
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 84fe37def0f1..6f8b5ea7aeae 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -232,8 +232,8 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
status = ioread16(&pcch_hdr->status);
iowrite16(0, &pcch_hdr->status);
- cpufreq_freq_transition_end(policy, &freqs, status != CMD_COMPLETE);
spin_unlock(&pcc_lock);
+ cpufreq_freq_transition_end(policy, &freqs, status != CMD_COMPLETE);
if (status != CMD_COMPLETE) {
pr_debug("target: FAILED for cpu %d, with status: 0x%x\n",
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index d1c559879dcc..40d055560e52 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -14,7 +14,7 @@
struct cxl_cxims_data {
int nr_maps;
- u64 xormaps[];
+ u64 xormaps[] __counted_by(nr_maps);
};
/*
@@ -112,9 +112,9 @@ static int cxl_parse_cxims(union acpi_subtable_headers *header, void *arg,
GFP_KERNEL);
if (!cximsd)
return -ENOMEM;
+ cximsd->nr_maps = nr_maps;
memcpy(cximsd->xormaps, cxims->xormap_list,
nr_maps * sizeof(*cximsd->xormaps));
- cximsd->nr_maps = nr_maps;
cxlrd->platform_data = cximsd;
return 0;
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index ca60bb8114f2..4df4f614f490 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -715,24 +715,25 @@ static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
for (i = 0; i < cel_entries; i++) {
u16 opcode = le16_to_cpu(cel_entry[i].opcode);
struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
+ int enabled = 0;
- if (!cmd && (!cxl_is_poison_command(opcode) ||
- !cxl_is_security_command(opcode))) {
- dev_dbg(dev,
- "Opcode 0x%04x unsupported by driver\n", opcode);
- continue;
- }
-
- if (cmd)
+ if (cmd) {
set_bit(cmd->info.id, mds->enabled_cmds);
+ enabled++;
+ }
- if (cxl_is_poison_command(opcode))
+ if (cxl_is_poison_command(opcode)) {
cxl_set_poison_cmd_enabled(&mds->poison, opcode);
+ enabled++;
+ }
- if (cxl_is_security_command(opcode))
+ if (cxl_is_security_command(opcode)) {
cxl_set_security_cmd_enabled(&mds->security, opcode);
+ enabled++;
+ }
- dev_dbg(dev, "Opcode 0x%04x enabled\n", opcode);
+ dev_dbg(dev, "Opcode 0x%04x %s\n", opcode,
+ enabled ? "enabled" : "unsupported by driver");
}
}
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 724be8448eb4..7ca01a834e18 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
+#include <linux/platform_device.h>
#include <linux/memregion.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
@@ -706,16 +707,20 @@ static int cxl_setup_comp_regs(struct device *dev, struct cxl_register_map *map,
return cxl_setup_regs(map);
}
-static inline int cxl_port_setup_regs(struct cxl_port *port,
- resource_size_t component_reg_phys)
+static int cxl_port_setup_regs(struct cxl_port *port,
+ resource_size_t component_reg_phys)
{
+ if (dev_is_platform(port->uport_dev))
+ return 0;
return cxl_setup_comp_regs(&port->dev, &port->comp_map,
component_reg_phys);
}
-static inline int cxl_dport_setup_regs(struct cxl_dport *dport,
- resource_size_t component_reg_phys)
+static int cxl_dport_setup_regs(struct cxl_dport *dport,
+ resource_size_t component_reg_phys)
{
+ if (dev_is_platform(dport->dport_dev))
+ return 0;
return cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map,
component_reg_phys);
}
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index e115ba382e04..6d63b8798c29 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -717,13 +717,35 @@ static int match_free_decoder(struct device *dev, void *data)
return 0;
}
+static int match_auto_decoder(struct device *dev, void *data)
+{
+ struct cxl_region_params *p = data;
+ struct cxl_decoder *cxld;
+ struct range *r;
+
+ if (!is_switch_decoder(dev))
+ return 0;
+
+ cxld = to_cxl_decoder(dev);
+ r = &cxld->hpa_range;
+
+ if (p->res && p->res->start == r->start && p->res->end == r->end)
+ return 1;
+
+ return 0;
+}
+
static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
struct cxl_region *cxlr)
{
struct device *dev;
int id = 0;
- dev = device_find_child(&port->dev, &id, match_free_decoder);
+ if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
+ dev = device_find_child(&port->dev, &cxlr->params,
+ match_auto_decoder);
+ else
+ dev = device_find_child(&port->dev, &id, match_free_decoder);
if (!dev)
return NULL;
/*
@@ -1154,16 +1176,15 @@ static int cxl_port_setup_targets(struct cxl_port *port,
}
/*
- * If @parent_port is masking address bits, pick the next unused address
- * bit to route @port's targets.
+ * Interleave granularity is a multiple of @parent_port granularity.
+ * Multiplier is the parent port interleave ways.
*/
- if (parent_iw > 1 && cxl_rr->nr_targets > 1) {
- u32 address_bit = max(peig + peiw, eiw + peig);
-
- eig = address_bit - eiw + 1;
- } else {
- eiw = peiw;
- eig = peig;
+ rc = granularity_to_eig(parent_ig * parent_iw, &eig);
+ if (rc) {
+ dev_dbg(&cxlr->dev,
+ "%s: invalid granularity calculation (%d * %d)\n",
+ dev_name(&parent_port->dev), parent_ig, parent_iw);
+ return rc;
}
rc = eig_to_granularity(eig, &ig);
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 1cb1494c28fe..44a21ab7add5 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -529,7 +529,6 @@ static int cxl_pci_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
static int cxl_pci_ras_unmask(struct pci_dev *pdev)
{
- struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus);
struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
void __iomem *addr;
u32 orig_val, val, mask;
@@ -541,9 +540,9 @@ static int cxl_pci_ras_unmask(struct pci_dev *pdev)
return 0;
}
- /* BIOS has CXL error control */
- if (!host_bridge->native_cxl_error)
- return -ENXIO;
+ /* BIOS has PCIe AER error control */
+ if (!pcie_aer_is_native(pdev))
+ return 0;
rc = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap);
if (rc)
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 08fdd0e2ed1b..4ccae1a3b884 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -474,25 +474,6 @@ config MXS_DMA
Support the MXS DMA engine. This engine including APBH-DMA
and APBX-DMA is integrated into some Freescale chips.
-config MX3_IPU
- bool "MX3x Image Processing Unit support"
- depends on ARCH_MXC
- select DMA_ENGINE
- default y
- help
- If you plan to use the Image Processing unit in the i.MX3x, say
- Y here. If unsure, select Y.
-
-config MX3_IPU_IRQS
- int "Number of dynamically mapped interrupts for IPU"
- depends on MX3_IPU
- range 2 137
- default 4
- help
- Out of 137 interrupt sources on i.MX31 IPU only very few are used.
- To avoid bloating the irq_desc[] array we allocate a sufficient
- number of IRQ slots and map them dynamically to specific sources.
-
config NBPFAXI_DMA
tristate "Renesas Type-AXI NBPF DMA support"
select DMA_ENGINE
@@ -699,7 +680,7 @@ config XGENE_DMA
config XILINX_DMA
tristate "Xilinx AXI DMAS Engine"
- depends on (ARCH_ZYNQ || MICROBLAZE || ARM64)
+ depends on HAS_IOMEM
select DMA_ENGINE
help
Enable support for Xilinx AXI VDMA Soft IP.
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index a4fd1ce29510..83553a97a010 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -32,8 +32,10 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw/
obj-$(CONFIG_DW_EDMA) += dw-edma/
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
-obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
-obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o
+fsl-edma-objs := fsl-edma-main.o fsl-edma-common.o
+obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
+mcf-edma-objs := mcf-edma-main.o fsl-edma-common.o
+obj-$(CONFIG_MCF_EDMA) += mcf-edma.o
obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
obj-$(CONFIG_FSL_RAID) += fsl_raid.o
obj-$(CONFIG_HISI_DMA) += hisi_dma.o
@@ -55,7 +57,6 @@ obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
obj-$(CONFIG_MV_XOR) += mv_xor.o
obj-$(CONFIG_MV_XOR_V2) += mv_xor_v2.o
obj-$(CONFIG_MXS_DMA) += mxs-dma.o
-obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
obj-$(CONFIG_OWL_DMA) += owl-dma.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
index 4cf8da77bdd9..3af795635c5c 100644
--- a/drivers/dma/apple-admac.c
+++ b/drivers/dma/apple-admac.c
@@ -10,8 +10,9 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index ee3a219e3a89..b2876f67471f 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -20,7 +20,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/overflow.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index 064761289a73..94ea35330eb5 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -35,7 +35,9 @@
#include <linux/mailbox_client.h>
#include <linux/mailbox/brcm-message.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/raid/pq.h>
diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c
index eabbcfcaa7cb..80096f94032d 100644
--- a/drivers/dma/bestcomm/bestcomm.c
+++ b/drivers/dma/bestcomm/bestcomm.c
@@ -14,9 +14,8 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/mpc52xx.h>
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 9c1a6e9a9c03..adbd47bd6adf 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -13,7 +13,6 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 826b98284fa1..b7388ae62d7f 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1147,69 +1147,27 @@ int dma_async_device_register(struct dma_device *device)
device->owner = device->dev->driver->owner;
- if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_MEMCPY");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_XOR");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_XOR_VAL");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_PQ");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_PQ_VAL");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_MEMSET");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_INTERRUPT");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_CYCLIC");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_INTERLEAVE");
- return -EIO;
- }
+#define CHECK_CAP(_name, _type) \
+{ \
+ if (dma_has_cap(_type, device->cap_mask) && !device->device_prep_##_name) { \
+ dev_err(device->dev, \
+ "Device claims capability %s, but op is not defined\n", \
+ __stringify(_type)); \
+ return -EIO; \
+ } \
+}
+ CHECK_CAP(dma_memcpy, DMA_MEMCPY);
+ CHECK_CAP(dma_xor, DMA_XOR);
+ CHECK_CAP(dma_xor_val, DMA_XOR_VAL);
+ CHECK_CAP(dma_pq, DMA_PQ);
+ CHECK_CAP(dma_pq_val, DMA_PQ_VAL);
+ CHECK_CAP(dma_memset, DMA_MEMSET);
+ CHECK_CAP(dma_interrupt, DMA_INTERRUPT);
+ CHECK_CAP(dma_cyclic, DMA_CYCLIC);
+ CHECK_CAP(interleaved_dma, DMA_INTERLEAVE);
+
+#undef CHECK_CAP
if (!device->device_tx_status) {
dev_err(device->dev, "Device tx_status is not defined\n");
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index 796b6caf0bab..dd02f84e404d 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -21,7 +21,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/dma/dw/rzn1-dmamux.c b/drivers/dma/dw/rzn1-dmamux.c
index f9912c3dd4d7..4fb8508419db 100644
--- a/drivers/dma/dw/rzn1-dmamux.c
+++ b/drivers/dma/dw/rzn1-dmamux.c
@@ -5,8 +5,10 @@
* Based on TI crossbar driver written by Peter Ujfalusi <peter.ujfalusi@ti.com>
*/
#include <linux/bitops.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/soc/renesas/r9a06g032-sysctrl.h>
#include <linux/types.h>
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 5338a94f1a69..5c4a448a1254 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -1320,11 +1320,9 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct ep93xx_dma_engine *edma;
struct dma_device *dma_dev;
- size_t edma_size;
int ret, i;
- edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
- edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
+ edma = kzalloc(struct_size(edma, channels, pdata->num_channels), GFP_KERNEL);
if (!edma)
return -ENOMEM;
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index a06a1575a2a5..a0f5741abcc4 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -7,6 +7,8 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
#include "fsl-edma-common.h"
@@ -40,14 +42,73 @@
#define EDMA64_ERRH 0x28
#define EDMA64_ERRL 0x2c
-#define EDMA_TCD 0x1000
+void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan)
+{
+ spin_lock(&fsl_chan->vchan.lock);
+
+ if (!fsl_chan->edesc) {
+ /* terminate_all called before */
+ spin_unlock(&fsl_chan->vchan.lock);
+ return;
+ }
+
+ if (!fsl_chan->edesc->iscyclic) {
+ list_del(&fsl_chan->edesc->vdesc.node);
+ vchan_cookie_complete(&fsl_chan->edesc->vdesc);
+ fsl_chan->edesc = NULL;
+ fsl_chan->status = DMA_COMPLETE;
+ fsl_chan->idle = true;
+ } else {
+ vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
+ }
+
+ if (!fsl_chan->edesc)
+ fsl_edma_xfer_desc(fsl_chan);
+
+ spin_unlock(&fsl_chan->vchan.lock);
+}
+
+static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
+{
+ u32 val, flags;
+
+ flags = fsl_edma_drvflags(fsl_chan);
+ val = edma_readl_chreg(fsl_chan, ch_sbr);
+ /* Remote/local swapped wrongly on iMX8 QM Audio edma */
+ if (flags & FSL_EDMA_DRV_QUIRK_SWAPPED) {
+ if (!fsl_chan->is_rxchan)
+ val |= EDMA_V3_CH_SBR_RD;
+ else
+ val |= EDMA_V3_CH_SBR_WR;
+ } else {
+ if (fsl_chan->is_rxchan)
+ val |= EDMA_V3_CH_SBR_RD;
+ else
+ val |= EDMA_V3_CH_SBR_WR;
+ }
+
+ if (fsl_chan->is_remote)
+ val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR);
+
+ edma_writel_chreg(fsl_chan, val, ch_sbr);
+
+ if (flags & FSL_EDMA_DRV_HAS_CHMUX)
+ edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux);
+
+ val = edma_readl_chreg(fsl_chan, ch_csr);
+ val |= EDMA_V3_CH_CSR_ERQ;
+ edma_writel_chreg(fsl_chan, val, ch_csr);
+}
static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
{
struct edma_regs *regs = &fsl_chan->edma->regs;
u32 ch = fsl_chan->vchan.chan.chan_id;
- if (fsl_chan->edma->drvdata->version == v1) {
+ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
+ return fsl_edma3_enable_request(fsl_chan);
+
+ if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
edma_writeb(fsl_chan->edma, ch, regs->serq);
} else {
@@ -59,12 +120,29 @@ static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
}
}
+static void fsl_edma3_disable_request(struct fsl_edma_chan *fsl_chan)
+{
+ u32 val = edma_readl_chreg(fsl_chan, ch_csr);
+ u32 flags;
+
+ flags = fsl_edma_drvflags(fsl_chan);
+
+ if (flags & FSL_EDMA_DRV_HAS_CHMUX)
+ edma_writel_chreg(fsl_chan, 0, ch_mux);
+
+ val &= ~EDMA_V3_CH_CSR_ERQ;
+ edma_writel_chreg(fsl_chan, val, ch_csr);
+}
+
void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
{
struct edma_regs *regs = &fsl_chan->edma->regs;
u32 ch = fsl_chan->vchan.chan.chan_id;
- if (fsl_chan->edma->drvdata->version == v1) {
+ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
+ return fsl_edma3_disable_request(fsl_chan);
+
+ if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
edma_writeb(fsl_chan->edma, ch, regs->cerq);
edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
} else {
@@ -75,7 +153,6 @@ void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
}
}
-EXPORT_SYMBOL_GPL(fsl_edma_disable_request);
static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
u32 off, u32 slot, bool enable)
@@ -112,36 +189,33 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
int endian_diff[4] = {3, 1, -1, -3};
u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
+ if (!dmamux_nr)
+ return;
+
chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
- if (fsl_chan->edma->drvdata->mux_swap)
+ if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_MUX_SWAP)
ch_off += endian_diff[ch_off % 4];
muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
slot = EDMAMUX_CHCFG_SOURCE(slot);
- if (fsl_chan->edma->drvdata->version == v3)
+ if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_CONFIG32)
mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable);
else
mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
}
-EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
{
- switch (addr_width) {
- case 1:
- return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
- case 2:
- return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
- case 4:
- return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
- case 8:
- return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
- default:
- return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
- }
+ u32 val;
+
+ if (addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ val = ffs(addr_width) - 1;
+ return val | (val << 8);
}
void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
@@ -155,7 +229,6 @@ void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
fsl_desc->tcd[i].ptcd);
kfree(fsl_desc);
}
-EXPORT_SYMBOL_GPL(fsl_edma_free_desc);
int fsl_edma_terminate_all(struct dma_chan *chan)
{
@@ -170,9 +243,12 @@ int fsl_edma_terminate_all(struct dma_chan *chan)
vchan_get_all_descriptors(&fsl_chan->vchan, &head);
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+
+ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_PD)
+ pm_runtime_allow(fsl_chan->pd_dev);
+
return 0;
}
-EXPORT_SYMBOL_GPL(fsl_edma_terminate_all);
int fsl_edma_pause(struct dma_chan *chan)
{
@@ -188,7 +264,6 @@ int fsl_edma_pause(struct dma_chan *chan)
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return 0;
}
-EXPORT_SYMBOL_GPL(fsl_edma_pause);
int fsl_edma_resume(struct dma_chan *chan)
{
@@ -204,7 +279,6 @@ int fsl_edma_resume(struct dma_chan *chan)
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return 0;
}
-EXPORT_SYMBOL_GPL(fsl_edma_resume);
static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
{
@@ -265,36 +339,41 @@ int fsl_edma_slave_config(struct dma_chan *chan,
return 0;
}
-EXPORT_SYMBOL_GPL(fsl_edma_slave_config);
static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
struct virt_dma_desc *vdesc, bool in_progress)
{
struct fsl_edma_desc *edesc = fsl_chan->edesc;
- struct edma_regs *regs = &fsl_chan->edma->regs;
- u32 ch = fsl_chan->vchan.chan.chan_id;
enum dma_transfer_direction dir = edesc->dirn;
dma_addr_t cur_addr, dma_addr;
size_t len, size;
+ u32 nbytes = 0;
int i;
/* calculate the total size in this desc */
- for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
- len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
- * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+ for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) {
+ nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes);
+ if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
+ nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
+ len += nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+ }
if (!in_progress)
return len;
if (dir == DMA_MEM_TO_DEV)
- cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].saddr);
+ cur_addr = edma_read_tcdreg(fsl_chan, saddr);
else
- cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].daddr);
+ cur_addr = edma_read_tcdreg(fsl_chan, daddr);
/* figure out the finished and calculate the residue */
for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
- size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
- * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+ nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes);
+ if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
+ nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
+
+ size = nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+
if (dir == DMA_MEM_TO_DEV)
dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
else
@@ -340,14 +419,10 @@ enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
return fsl_chan->status;
}
-EXPORT_SYMBOL_GPL(fsl_edma_tx_status);
static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
struct fsl_edma_hw_tcd *tcd)
{
- struct fsl_edma_engine *edma = fsl_chan->edma;
- struct edma_regs *regs = &fsl_chan->edma->regs;
- u32 ch = fsl_chan->vchan.chan.chan_id;
u16 csr = 0;
/*
@@ -356,23 +431,22 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
* big- or little-endian obeying the eDMA engine model endian,
* and this is performed from specific edma_write functions
*/
- edma_writew(edma, 0, &regs->tcd[ch].csr);
+ edma_write_tcdreg(fsl_chan, 0, csr);
- edma_writel(edma, (s32)tcd->saddr, &regs->tcd[ch].saddr);
- edma_writel(edma, (s32)tcd->daddr, &regs->tcd[ch].daddr);
+ edma_write_tcdreg(fsl_chan, tcd->saddr, saddr);
+ edma_write_tcdreg(fsl_chan, tcd->daddr, daddr);
- edma_writew(edma, (s16)tcd->attr, &regs->tcd[ch].attr);
- edma_writew(edma, tcd->soff, &regs->tcd[ch].soff);
+ edma_write_tcdreg(fsl_chan, tcd->attr, attr);
+ edma_write_tcdreg(fsl_chan, tcd->soff, soff);
- edma_writel(edma, (s32)tcd->nbytes, &regs->tcd[ch].nbytes);
- edma_writel(edma, (s32)tcd->slast, &regs->tcd[ch].slast);
+ edma_write_tcdreg(fsl_chan, tcd->nbytes, nbytes);
+ edma_write_tcdreg(fsl_chan, tcd->slast, slast);
- edma_writew(edma, (s16)tcd->citer, &regs->tcd[ch].citer);
- edma_writew(edma, (s16)tcd->biter, &regs->tcd[ch].biter);
- edma_writew(edma, (s16)tcd->doff, &regs->tcd[ch].doff);
+ edma_write_tcdreg(fsl_chan, tcd->citer, citer);
+ edma_write_tcdreg(fsl_chan, tcd->biter, biter);
+ edma_write_tcdreg(fsl_chan, tcd->doff, doff);
- edma_writel(edma, (s32)tcd->dlast_sga,
- &regs->tcd[ch].dlast_sga);
+ edma_write_tcdreg(fsl_chan, tcd->dlast_sga, dlast_sga);
if (fsl_chan->is_sw) {
csr = le16_to_cpu(tcd->csr);
@@ -380,16 +454,19 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
tcd->csr = cpu_to_le16(csr);
}
- edma_writew(edma, (s16)tcd->csr, &regs->tcd[ch].csr);
+ edma_write_tcdreg(fsl_chan, tcd->csr, csr);
}
static inline
-void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
+void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
+ struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
u16 biter, u16 doff, u32 dlast_sga, bool major_int,
bool disable_req, bool enable_sg)
{
+ struct dma_slave_config *cfg = &fsl_chan->cfg;
u16 csr = 0;
+ u32 burst;
/*
* eDMA hardware SGs require the TCDs to be stored in little
@@ -404,6 +481,21 @@ void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
tcd->soff = cpu_to_le16(soff);
+ if (fsl_chan->is_multi_fifo) {
+ /* set mloff to support multiple fifo */
+ burst = cfg->direction == DMA_DEV_TO_MEM ?
+ cfg->src_addr_width : cfg->dst_addr_width;
+ nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
+ /* enable DMLOE/SMLOE */
+ if (cfg->direction == DMA_MEM_TO_DEV) {
+ nbytes |= EDMA_V3_TCD_NBYTES_DMLOE;
+ nbytes &= ~EDMA_V3_TCD_NBYTES_SMLOE;
+ } else {
+ nbytes |= EDMA_V3_TCD_NBYTES_SMLOE;
+ nbytes &= ~EDMA_V3_TCD_NBYTES_DMLOE;
+ }
+ }
+
tcd->nbytes = cpu_to_le32(nbytes);
tcd->slast = cpu_to_le32(slast);
@@ -422,6 +514,12 @@ void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
if (enable_sg)
csr |= EDMA_TCD_CSR_E_SG;
+ if (fsl_chan->is_rxchan)
+ csr |= EDMA_TCD_CSR_ACTIVE;
+
+ if (fsl_chan->is_sw)
+ csr |= EDMA_TCD_CSR_START;
+
tcd->csr = cpu_to_le16(csr);
}
@@ -461,6 +559,7 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
struct fsl_edma_desc *fsl_desc;
dma_addr_t dma_buf_next;
+ bool major_int = true;
int sg_len, i;
u32 src_addr, dst_addr, last_sg, nbytes;
u16 soff, doff, iter;
@@ -504,23 +603,28 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
src_addr = dma_buf_next;
dst_addr = fsl_chan->dma_dev_addr;
soff = fsl_chan->cfg.dst_addr_width;
- doff = 0;
- } else {
+ doff = fsl_chan->is_multi_fifo ? 4 : 0;
+ } else if (direction == DMA_DEV_TO_MEM) {
src_addr = fsl_chan->dma_dev_addr;
dst_addr = dma_buf_next;
- soff = 0;
+ soff = fsl_chan->is_multi_fifo ? 4 : 0;
doff = fsl_chan->cfg.src_addr_width;
+ } else {
+ /* DMA_DEV_TO_DEV */
+ src_addr = fsl_chan->cfg.src_addr;
+ dst_addr = fsl_chan->cfg.dst_addr;
+ soff = doff = 0;
+ major_int = false;
}
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
+ fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
fsl_chan->attr, soff, nbytes, 0, iter,
- iter, doff, last_sg, true, false, true);
+ iter, doff, last_sg, major_int, false, true);
dma_buf_next += period_len;
}
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
}
-EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic);
struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
@@ -564,23 +668,51 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
dst_addr = fsl_chan->dma_dev_addr;
soff = fsl_chan->cfg.dst_addr_width;
doff = 0;
- } else {
+ } else if (direction == DMA_DEV_TO_MEM) {
src_addr = fsl_chan->dma_dev_addr;
dst_addr = sg_dma_address(sg);
soff = 0;
doff = fsl_chan->cfg.src_addr_width;
+ } else {
+ /* DMA_DEV_TO_DEV */
+ src_addr = fsl_chan->cfg.src_addr;
+ dst_addr = fsl_chan->cfg.dst_addr;
+ soff = 0;
+ doff = 0;
}
+ /*
+ * Choose the suitable burst length if sg_dma_len is not
+ * multiple of burst length so that the whole transfer length is
+ * multiple of minor loop(burst length).
+ */
+ if (sg_dma_len(sg) % nbytes) {
+ u32 width = (direction == DMA_DEV_TO_MEM) ? doff : soff;
+ u32 burst = (direction == DMA_DEV_TO_MEM) ?
+ fsl_chan->cfg.src_maxburst :
+ fsl_chan->cfg.dst_maxburst;
+ int j;
+
+ for (j = burst; j > 1; j--) {
+ if (!(sg_dma_len(sg) % (j * width))) {
+ nbytes = j * width;
+ break;
+ }
+ }
+ /* Set burst size as 1 if there's no suitable one */
+ if (j == 1)
+ nbytes = width;
+ }
iter = sg_dma_len(sg) / nbytes;
if (i < sg_len - 1) {
last_sg = fsl_desc->tcd[(i + 1)].ptcd;
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
+ fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
dst_addr, fsl_chan->attr, soff,
nbytes, 0, iter, iter, doff, last_sg,
false, false, true);
} else {
last_sg = 0;
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
+ fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
dst_addr, fsl_chan->attr, soff,
nbytes, 0, iter, iter, doff, last_sg,
true, true, false);
@@ -589,7 +721,6 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
}
-EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg);
struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
dma_addr_t dma_dst, dma_addr_t dma_src,
@@ -606,13 +737,12 @@ struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
fsl_chan->is_sw = true;
/* To match with copy_align and max_seg_size so 1 tcd is enough */
- fsl_edma_fill_tcd(fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
- EDMA_TCD_ATTR_SSIZE_32BYTE | EDMA_TCD_ATTR_DSIZE_32BYTE,
+ fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
+ fsl_edma_get_tcd_attr(DMA_SLAVE_BUSWIDTH_32_BYTES),
32, len, 0, 1, 1, 32, 0, true, true, false);
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
}
-EXPORT_SYMBOL_GPL(fsl_edma_prep_memcpy);
void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
{
@@ -629,7 +759,6 @@ void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
fsl_chan->status = DMA_IN_PROGRESS;
fsl_chan->idle = false;
}
-EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc);
void fsl_edma_issue_pending(struct dma_chan *chan)
{
@@ -649,7 +778,6 @@ void fsl_edma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
}
-EXPORT_SYMBOL_GPL(fsl_edma_issue_pending);
int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
{
@@ -660,7 +788,6 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
32, 0);
return 0;
}
-EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources);
void fsl_edma_free_chan_resources(struct dma_chan *chan)
{
@@ -683,7 +810,6 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
fsl_chan->tcd_pool = NULL;
fsl_chan->is_sw = false;
}
-EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources);
void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
{
@@ -695,12 +821,10 @@ void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
tasklet_kill(&chan->vchan.task);
}
}
-EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
/*
- * On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
- * register offsets are different compared to ColdFire mcf5441x 64 channels
- * edma (here called "v2").
+ * On the 32 channels Vybrid/mpc577x edma version, register offsets are
+ * different compared to ColdFire mcf5441x 64 channels edma.
*
* This function sets up register offsets as per proper declared version
* so must be called in xxx_edma_probe() just after setting the
@@ -708,41 +832,30 @@ EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
*/
void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
{
+ bool is64 = !!(edma->drvdata->flags & FSL_EDMA_DRV_EDMA64);
+
edma->regs.cr = edma->membase + EDMA_CR;
edma->regs.es = edma->membase + EDMA_ES;
edma->regs.erql = edma->membase + EDMA_ERQ;
edma->regs.eeil = edma->membase + EDMA_EEI;
- edma->regs.serq = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_SERQ : EDMA_SERQ);
- edma->regs.cerq = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_CERQ : EDMA_CERQ);
- edma->regs.seei = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_SEEI : EDMA_SEEI);
- edma->regs.ceei = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_CEEI : EDMA_CEEI);
- edma->regs.cint = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_CINT : EDMA_CINT);
- edma->regs.cerr = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_CERR : EDMA_CERR);
- edma->regs.ssrt = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_SSRT : EDMA_SSRT);
- edma->regs.cdne = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_CDNE : EDMA_CDNE);
- edma->regs.intl = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_INTL : EDMA_INTR);
- edma->regs.errl = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_ERRL : EDMA_ERR);
-
- if (edma->drvdata->version == v2) {
+ edma->regs.serq = edma->membase + (is64 ? EDMA64_SERQ : EDMA_SERQ);
+ edma->regs.cerq = edma->membase + (is64 ? EDMA64_CERQ : EDMA_CERQ);
+ edma->regs.seei = edma->membase + (is64 ? EDMA64_SEEI : EDMA_SEEI);
+ edma->regs.ceei = edma->membase + (is64 ? EDMA64_CEEI : EDMA_CEEI);
+ edma->regs.cint = edma->membase + (is64 ? EDMA64_CINT : EDMA_CINT);
+ edma->regs.cerr = edma->membase + (is64 ? EDMA64_CERR : EDMA_CERR);
+ edma->regs.ssrt = edma->membase + (is64 ? EDMA64_SSRT : EDMA_SSRT);
+ edma->regs.cdne = edma->membase + (is64 ? EDMA64_CDNE : EDMA_CDNE);
+ edma->regs.intl = edma->membase + (is64 ? EDMA64_INTL : EDMA_INTR);
+ edma->regs.errl = edma->membase + (is64 ? EDMA64_ERRL : EDMA_ERR);
+
+ if (is64) {
edma->regs.erqh = edma->membase + EDMA64_ERQH;
edma->regs.eeih = edma->membase + EDMA64_EEIH;
edma->regs.errh = edma->membase + EDMA64_ERRH;
edma->regs.inth = edma->membase + EDMA64_INTH;
}
-
- edma->regs.tcd = edma->membase + EDMA_TCD;
}
-EXPORT_SYMBOL_GPL(fsl_edma_setup_regs);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 004ec4a6bc86..3cc0cc8fc2d0 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -29,16 +29,6 @@
#define EDMA_TCD_ATTR_DMOD(x) (((x) & GENMASK(4, 0)) << 3)
#define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
#define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
-#define EDMA_TCD_ATTR_DSIZE_8BIT 0
-#define EDMA_TCD_ATTR_DSIZE_16BIT BIT(0)
-#define EDMA_TCD_ATTR_DSIZE_32BIT BIT(1)
-#define EDMA_TCD_ATTR_DSIZE_64BIT (BIT(0) | BIT(1))
-#define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(2) | BIT(0))
-#define EDMA_TCD_ATTR_SSIZE_8BIT 0
-#define EDMA_TCD_ATTR_SSIZE_16BIT (EDMA_TCD_ATTR_DSIZE_16BIT << 8)
-#define EDMA_TCD_ATTR_SSIZE_32BIT (EDMA_TCD_ATTR_DSIZE_32BIT << 8)
-#define EDMA_TCD_ATTR_SSIZE_64BIT (EDMA_TCD_ATTR_DSIZE_64BIT << 8)
-#define EDMA_TCD_ATTR_SSIZE_32BYTE (EDMA_TCD_ATTR_DSIZE_32BYTE << 8)
#define EDMA_TCD_CITER_CITER(x) ((x) & GENMASK(14, 0))
#define EDMA_TCD_BITER_BITER(x) ((x) & GENMASK(14, 0))
@@ -52,16 +42,32 @@
#define EDMA_TCD_CSR_ACTIVE BIT(6)
#define EDMA_TCD_CSR_DONE BIT(7)
+#define EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(x) ((x) & GENMASK(9, 0))
+#define EDMA_V3_TCD_NBYTES_MLOFF(x) (x << 10)
+#define EDMA_V3_TCD_NBYTES_DMLOE (1 << 30)
+#define EDMA_V3_TCD_NBYTES_SMLOE (1 << 31)
+
#define EDMAMUX_CHCFG_DIS 0x0
#define EDMAMUX_CHCFG_ENBL 0x80
#define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
#define DMAMUX_NR 2
+#define EDMA_TCD 0x1000
+
#define FSL_EDMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+#define EDMA_V3_CH_SBR_RD BIT(22)
+#define EDMA_V3_CH_SBR_WR BIT(21)
+#define EDMA_V3_CH_CSR_ERQ BIT(0)
+#define EDMA_V3_CH_CSR_EARQ BIT(1)
+#define EDMA_V3_CH_CSR_EEI BIT(2)
+#define EDMA_V3_CH_CSR_DONE BIT(30)
+#define EDMA_V3_CH_CSR_ACTIVE BIT(31)
+
enum fsl_edma_pm_state {
RUNNING = 0,
SUSPENDED,
@@ -81,6 +87,18 @@ struct fsl_edma_hw_tcd {
__le16 biter;
};
+struct fsl_edma3_ch_reg {
+ __le32 ch_csr;
+ __le32 ch_es;
+ __le32 ch_int;
+ __le32 ch_sbr;
+ __le32 ch_pri;
+ __le32 ch_mux;
+ __le32 ch_mattr; /* edma4, reserved for edma3 */
+ __le32 ch_reserved;
+ struct fsl_edma_hw_tcd tcd;
+} __packed;
+
/*
* These are iomem pointers, for both v32 and v64.
*/
@@ -103,7 +121,6 @@ struct edma_regs {
void __iomem *intl;
void __iomem *errh;
void __iomem *errl;
- struct fsl_edma_hw_tcd __iomem *tcd;
};
struct fsl_edma_sw_tcd {
@@ -126,7 +143,20 @@ struct fsl_edma_chan {
dma_addr_t dma_dev_addr;
u32 dma_dev_size;
enum dma_data_direction dma_dir;
- char chan_name[16];
+ char chan_name[32];
+ struct fsl_edma_hw_tcd __iomem *tcd;
+ u32 real_count;
+ struct work_struct issue_worker;
+ struct platform_device *pdev;
+ struct device *pd_dev;
+ u32 srcid;
+ struct clk *clk;
+ int priority;
+ int hw_chanid;
+ int txirq;
+ bool is_rxchan;
+ bool is_remote;
+ bool is_multi_fifo;
};
struct fsl_edma_desc {
@@ -138,17 +168,32 @@ struct fsl_edma_desc {
struct fsl_edma_sw_tcd tcd[];
};
-enum edma_version {
- v1, /* 32ch, Vybrid, mpc57x, etc */
- v2, /* 64ch Coldfire */
- v3, /* 32ch, i.mx7ulp */
-};
+#define FSL_EDMA_DRV_HAS_DMACLK BIT(0)
+#define FSL_EDMA_DRV_MUX_SWAP BIT(1)
+#define FSL_EDMA_DRV_CONFIG32 BIT(2)
+#define FSL_EDMA_DRV_WRAP_IO BIT(3)
+#define FSL_EDMA_DRV_EDMA64 BIT(4)
+#define FSL_EDMA_DRV_HAS_PD BIT(5)
+#define FSL_EDMA_DRV_HAS_CHCLK BIT(6)
+#define FSL_EDMA_DRV_HAS_CHMUX BIT(7)
+/* imx8 QM audio edma remote local swapped */
+#define FSL_EDMA_DRV_QUIRK_SWAPPED BIT(8)
+/* control and status register is in tcd address space, edma3 reg layout */
+#define FSL_EDMA_DRV_SPLIT_REG BIT(9)
+#define FSL_EDMA_DRV_BUS_8BYTE BIT(10)
+#define FSL_EDMA_DRV_DEV_TO_DEV BIT(11)
+#define FSL_EDMA_DRV_ALIGN_64BYTE BIT(12)
+
+#define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \
+ FSL_EDMA_DRV_BUS_8BYTE | \
+ FSL_EDMA_DRV_DEV_TO_DEV | \
+ FSL_EDMA_DRV_ALIGN_64BYTE)
struct fsl_edma_drvdata {
- enum edma_version version;
- u32 dmamuxs;
- bool has_dmaclk;
- bool mux_swap;
+ u32 dmamuxs; /* only used before v3 */
+ u32 chreg_off;
+ u32 chreg_space_sz;
+ u32 flags;
int (*setup_irq)(struct platform_device *pdev,
struct fsl_edma_engine *fsl_edma);
};
@@ -159,6 +204,7 @@ struct fsl_edma_engine {
void __iomem *muxbase[DMAMUX_NR];
struct clk *muxclk[DMAMUX_NR];
struct clk *dmaclk;
+ struct clk *chclk;
struct mutex fsl_edma_mutex;
const struct fsl_edma_drvdata *drvdata;
u32 n_chans;
@@ -166,9 +212,28 @@ struct fsl_edma_engine {
int errirq;
bool big_endian;
struct edma_regs regs;
+ u64 chan_masked;
struct fsl_edma_chan chans[];
};
+#define edma_read_tcdreg(chan, __name) \
+(sizeof(chan->tcd->__name) == sizeof(u32) ? \
+ edma_readl(chan->edma, &chan->tcd->__name) : \
+ edma_readw(chan->edma, &chan->tcd->__name))
+
+#define edma_write_tcdreg(chan, val, __name) \
+(sizeof(chan->tcd->__name) == sizeof(u32) ? \
+ edma_writel(chan->edma, (u32 __force)val, &chan->tcd->__name) : \
+ edma_writew(chan->edma, (u16 __force)val, &chan->tcd->__name))
+
+#define edma_readl_chreg(chan, __name) \
+ edma_readl(chan->edma, \
+ (void __iomem *)&(container_of(chan->tcd, struct fsl_edma3_ch_reg, tcd)->__name))
+
+#define edma_writel_chreg(chan, val, __name) \
+ edma_writel(chan->edma, val, \
+ (void __iomem *)&(container_of(chan->tcd, struct fsl_edma3_ch_reg, tcd)->__name))
+
/*
* R/W functions for big- or little-endian registers:
* The eDMA controller's endian is independent of the CPU core's endian.
@@ -183,6 +248,14 @@ static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
return ioread32(addr);
}
+static inline u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
+{
+ if (edma->big_endian)
+ return ioread16be(addr);
+ else
+ return ioread16(addr);
+}
+
static inline void edma_writeb(struct fsl_edma_engine *edma,
u8 val, void __iomem *addr)
{
@@ -217,11 +290,23 @@ static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
return container_of(chan, struct fsl_edma_chan, vchan.chan);
}
+static inline u32 fsl_edma_drvflags(struct fsl_edma_chan *fsl_chan)
+{
+ return fsl_chan->edma->drvdata->flags;
+}
+
static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
{
return container_of(vd, struct fsl_edma_desc, vdesc);
}
+static inline void fsl_edma_err_chan_handler(struct fsl_edma_chan *fsl_chan)
+{
+ fsl_chan->status = DMA_ERROR;
+ fsl_chan->idle = true;
+}
+
+void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan);
void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan);
void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
unsigned int slot, bool enable);
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma-main.c
index e40769666e39..63d48d046f04 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -18,9 +18,15 @@
#include <linux/of_irq.h>
#include <linux/of_dma.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
#include "fsl-edma-common.h"
+#define ARGS_RX BIT(0)
+#define ARGS_REMOTE BIT(1)
+#define ARGS_MULTI_FIFO BIT(2)
+
static void fsl_edma_synchronize(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
@@ -33,7 +39,6 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
struct fsl_edma_engine *fsl_edma = dev_id;
unsigned int intr, ch;
struct edma_regs *regs = &fsl_edma->regs;
- struct fsl_edma_chan *fsl_chan;
intr = edma_readl(fsl_edma, regs->intl);
if (!intr)
@@ -42,33 +47,25 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
for (ch = 0; ch < fsl_edma->n_chans; ch++) {
if (intr & (0x1 << ch)) {
edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), regs->cint);
+ fsl_edma_tx_chan_handler(&fsl_edma->chans[ch]);
+ }
+ }
+ return IRQ_HANDLED;
+}
- fsl_chan = &fsl_edma->chans[ch];
-
- spin_lock(&fsl_chan->vchan.lock);
+static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
+{
+ struct fsl_edma_chan *fsl_chan = dev_id;
+ unsigned int intr;
- if (!fsl_chan->edesc) {
- /* terminate_all called before */
- spin_unlock(&fsl_chan->vchan.lock);
- continue;
- }
+ intr = edma_readl_chreg(fsl_chan, ch_int);
+ if (!intr)
+ return IRQ_HANDLED;
- if (!fsl_chan->edesc->iscyclic) {
- list_del(&fsl_chan->edesc->vdesc.node);
- vchan_cookie_complete(&fsl_chan->edesc->vdesc);
- fsl_chan->edesc = NULL;
- fsl_chan->status = DMA_COMPLETE;
- fsl_chan->idle = true;
- } else {
- vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
- }
+ edma_writel_chreg(fsl_chan, 1, ch_int);
- if (!fsl_chan->edesc)
- fsl_edma_xfer_desc(fsl_chan);
+ fsl_edma_tx_chan_handler(fsl_chan);
- spin_unlock(&fsl_chan->vchan.lock);
- }
- }
return IRQ_HANDLED;
}
@@ -86,8 +83,7 @@ static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
if (err & (0x1 << ch)) {
fsl_edma_disable_request(&fsl_edma->chans[ch]);
edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), regs->cerr);
- fsl_edma->chans[ch].status = DMA_ERROR;
- fsl_edma->chans[ch].idle = true;
+ fsl_edma_err_chan_handler(&fsl_edma->chans[ch]);
}
}
return IRQ_HANDLED;
@@ -134,11 +130,58 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
return NULL;
}
+static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
+ struct dma_chan *chan, *_chan;
+ struct fsl_edma_chan *fsl_chan;
+ bool b_chmux;
+ int i;
+
+ if (dma_spec->args_count != 3)
+ return NULL;
+
+ b_chmux = !!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHMUX);
+
+ mutex_lock(&fsl_edma->fsl_edma_mutex);
+ list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels,
+ device_node) {
+
+ if (chan->client_count)
+ continue;
+
+ fsl_chan = to_fsl_edma_chan(chan);
+ i = fsl_chan - fsl_edma->chans;
+
+ chan = dma_get_slave_channel(chan);
+ chan->device->privatecnt++;
+ fsl_chan->priority = dma_spec->args[1];
+ fsl_chan->is_rxchan = dma_spec->args[2] & ARGS_RX;
+ fsl_chan->is_remote = dma_spec->args[2] & ARGS_REMOTE;
+ fsl_chan->is_multi_fifo = dma_spec->args[2] & ARGS_MULTI_FIFO;
+
+ if (!b_chmux && i == dma_spec->args[0]) {
+ mutex_unlock(&fsl_edma->fsl_edma_mutex);
+ return chan;
+ } else if (b_chmux && !fsl_chan->srcid) {
+ /* if controller support channel mux, choose a free channel */
+ fsl_chan->srcid = dma_spec->args[0];
+ mutex_unlock(&fsl_edma->fsl_edma_mutex);
+ return chan;
+ }
+ }
+ mutex_unlock(&fsl_edma->fsl_edma_mutex);
+ return NULL;
+}
+
static int
fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
int ret;
+ edma_writel(fsl_edma, ~0, fsl_edma->regs.intl);
+
fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
if (fsl_edma->txirq < 0)
return fsl_edma->txirq;
@@ -173,6 +216,37 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
return 0;
}
+static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+
+ struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
+
+ if (fsl_edma->chan_masked & BIT(i))
+ continue;
+
+ /* request channel irq */
+ fsl_chan->txirq = platform_get_irq(pdev, i);
+ if (fsl_chan->txirq < 0) {
+ dev_err(&pdev->dev, "Can't get chan %d's irq.\n", i);
+ return -EINVAL;
+ }
+
+ ret = devm_request_irq(&pdev->dev, fsl_chan->txirq,
+ fsl_edma3_tx_handler, IRQF_SHARED,
+ fsl_chan->chan_name, fsl_chan);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register chan%d's IRQ.\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int
fsl_edma2_irq_init(struct platform_device *pdev,
struct fsl_edma_engine *fsl_edma)
@@ -180,6 +254,8 @@ fsl_edma2_irq_init(struct platform_device *pdev,
int i, ret, irq;
int count;
+ edma_writel(fsl_edma, ~0, fsl_edma->regs.intl);
+
count = platform_irq_count(pdev);
dev_dbg(&pdev->dev, "%s Found %d interrupts\r\n", __func__, count);
if (count <= 2) {
@@ -197,8 +273,6 @@ fsl_edma2_irq_init(struct platform_device *pdev,
if (irq < 0)
return -ENXIO;
- sprintf(fsl_edma->chans[i].chan_name, "eDMA2-CH%02d", i);
-
/* The last IRQ is for eDMA err */
if (i == count - 1)
ret = devm_request_irq(&pdev->dev, irq,
@@ -236,33 +310,110 @@ static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
}
static struct fsl_edma_drvdata vf610_data = {
- .version = v1,
.dmamuxs = DMAMUX_NR,
+ .flags = FSL_EDMA_DRV_WRAP_IO,
+ .chreg_off = EDMA_TCD,
+ .chreg_space_sz = sizeof(struct fsl_edma_hw_tcd),
.setup_irq = fsl_edma_irq_init,
};
static struct fsl_edma_drvdata ls1028a_data = {
- .version = v1,
.dmamuxs = DMAMUX_NR,
- .mux_swap = true,
+ .flags = FSL_EDMA_DRV_MUX_SWAP | FSL_EDMA_DRV_WRAP_IO,
+ .chreg_off = EDMA_TCD,
+ .chreg_space_sz = sizeof(struct fsl_edma_hw_tcd),
.setup_irq = fsl_edma_irq_init,
};
static struct fsl_edma_drvdata imx7ulp_data = {
- .version = v3,
.dmamuxs = 1,
- .has_dmaclk = true,
+ .chreg_off = EDMA_TCD,
+ .chreg_space_sz = sizeof(struct fsl_edma_hw_tcd),
+ .flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_CONFIG32,
.setup_irq = fsl_edma2_irq_init,
};
+static struct fsl_edma_drvdata imx8qm_data = {
+ .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3,
+ .chreg_space_sz = 0x10000,
+ .chreg_off = 0x10000,
+ .setup_irq = fsl_edma3_irq_init,
+};
+
+static struct fsl_edma_drvdata imx8qm_audio_data = {
+ .flags = FSL_EDMA_DRV_QUIRK_SWAPPED | FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3,
+ .chreg_space_sz = 0x10000,
+ .chreg_off = 0x10000,
+ .setup_irq = fsl_edma3_irq_init,
+};
+
+static struct fsl_edma_drvdata imx93_data3 = {
+ .flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3,
+ .chreg_space_sz = 0x10000,
+ .chreg_off = 0x10000,
+ .setup_irq = fsl_edma3_irq_init,
+};
+
+static struct fsl_edma_drvdata imx93_data4 = {
+ .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3,
+ .chreg_space_sz = 0x8000,
+ .chreg_off = 0x10000,
+ .setup_irq = fsl_edma3_irq_init,
+};
+
static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,vf610-edma", .data = &vf610_data},
{ .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
{ .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
+ { .compatible = "fsl,imx8qm-edma", .data = &imx8qm_data},
+ { .compatible = "fsl,imx8qm-adma", .data = &imx8qm_audio_data},
+ { .compatible = "fsl,imx93-edma3", .data = &imx93_data3},
+ { .compatible = "fsl,imx93-edma4", .data = &imx93_data4},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
+static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
+{
+ struct fsl_edma_chan *fsl_chan;
+ struct device_link *link;
+ struct device *pd_chan;
+ struct device *dev;
+ int i;
+
+ dev = &pdev->dev;
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+ if (fsl_edma->chan_masked & BIT(i))
+ continue;
+
+ fsl_chan = &fsl_edma->chans[i];
+
+ pd_chan = dev_pm_domain_attach_by_id(dev, i);
+ if (IS_ERR_OR_NULL(pd_chan)) {
+ dev_err(dev, "Failed attach pd %d\n", i);
+ return -EINVAL;
+ }
+
+ link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (IS_ERR(link)) {
+ dev_err(dev, "Failed to add device_link to %d: %ld\n", i,
+ PTR_ERR(link));
+ return -EINVAL;
+ }
+
+ fsl_chan->pd_dev = pd_chan;
+
+ pm_runtime_use_autosuspend(fsl_chan->pd_dev);
+ pm_runtime_set_autosuspend_delay(fsl_chan->pd_dev, 200);
+ pm_runtime_set_active(fsl_chan->pd_dev);
+ }
+
+ return 0;
+}
+
static int fsl_edma_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
@@ -270,9 +421,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct fsl_edma_engine *fsl_edma;
const struct fsl_edma_drvdata *drvdata = NULL;
- struct fsl_edma_chan *fsl_chan;
+ u32 chan_mask[2] = {0, 0};
struct edma_regs *regs;
- int len, chans;
+ int chans;
int ret, i;
if (of_id)
@@ -288,8 +439,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
return ret;
}
- len = sizeof(*fsl_edma) + sizeof(*fsl_chan) * chans;
- fsl_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ fsl_edma = devm_kzalloc(&pdev->dev, struct_size(fsl_edma, chans, chans),
+ GFP_KERNEL);
if (!fsl_edma)
return -ENOMEM;
@@ -301,26 +452,42 @@ static int fsl_edma_probe(struct platform_device *pdev)
if (IS_ERR(fsl_edma->membase))
return PTR_ERR(fsl_edma->membase);
- fsl_edma_setup_regs(fsl_edma);
- regs = &fsl_edma->regs;
+ if (!(drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)) {
+ fsl_edma_setup_regs(fsl_edma);
+ regs = &fsl_edma->regs;
+ }
- if (drvdata->has_dmaclk) {
- fsl_edma->dmaclk = devm_clk_get(&pdev->dev, "dma");
+ if (drvdata->flags & FSL_EDMA_DRV_HAS_DMACLK) {
+ fsl_edma->dmaclk = devm_clk_get_enabled(&pdev->dev, "dma");
if (IS_ERR(fsl_edma->dmaclk)) {
dev_err(&pdev->dev, "Missing DMA block clock.\n");
return PTR_ERR(fsl_edma->dmaclk);
}
+ }
- ret = clk_prepare_enable(fsl_edma->dmaclk);
- if (ret) {
- dev_err(&pdev->dev, "DMA clk block failed.\n");
- return ret;
+ if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) {
+ fsl_edma->chclk = devm_clk_get_enabled(&pdev->dev, "mp");
+ if (IS_ERR(fsl_edma->chclk)) {
+ dev_err(&pdev->dev, "Missing MP block clock.\n");
+ return PTR_ERR(fsl_edma->chclk);
}
}
+ ret = of_property_read_variable_u32_array(np, "dma-channel-mask", chan_mask, 1, 2);
+
+ if (ret > 0) {
+ fsl_edma->chan_masked = chan_mask[1];
+ fsl_edma->chan_masked <<= 32;
+ fsl_edma->chan_masked |= chan_mask[0];
+ }
+
for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
char clkname[32];
+ /* eDMAv3 mux register move to TCD area if ch_mux exist */
+ if (drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)
+ break;
+
fsl_edma->muxbase[i] = devm_platform_ioremap_resource(pdev,
1 + i);
if (IS_ERR(fsl_edma->muxbase[i])) {
@@ -330,26 +497,32 @@ static int fsl_edma_probe(struct platform_device *pdev)
}
sprintf(clkname, "dmamux%d", i);
- fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
+ fsl_edma->muxclk[i] = devm_clk_get_enabled(&pdev->dev, clkname);
if (IS_ERR(fsl_edma->muxclk[i])) {
dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
/* on error: disable all previously enabled clks */
- fsl_disable_clocks(fsl_edma, i);
return PTR_ERR(fsl_edma->muxclk[i]);
}
-
- ret = clk_prepare_enable(fsl_edma->muxclk[i]);
- if (ret)
- /* on error: disable all previously enabled clks */
- fsl_disable_clocks(fsl_edma, i);
-
}
fsl_edma->big_endian = of_property_read_bool(np, "big-endian");
+ if (drvdata->flags & FSL_EDMA_DRV_HAS_PD) {
+ ret = fsl_edma3_attach_pd(pdev, fsl_edma);
+ if (ret)
+ return ret;
+ }
+
INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
for (i = 0; i < fsl_edma->n_chans; i++) {
struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
+ int len;
+
+ if (fsl_edma->chan_masked & BIT(i))
+ continue;
+
+ snprintf(fsl_chan->chan_name, sizeof(fsl_chan->chan_name), "%s-CH%02d",
+ dev_name(&pdev->dev), i);
fsl_chan->edma = fsl_edma;
fsl_chan->pm_state = RUNNING;
@@ -357,13 +530,19 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_chan->idle = true;
fsl_chan->dma_dir = DMA_NONE;
fsl_chan->vchan.desc_free = fsl_edma_free_desc;
+
+ len = (drvdata->flags & FSL_EDMA_DRV_SPLIT_REG) ?
+ offsetof(struct fsl_edma3_ch_reg, tcd) : 0;
+ fsl_chan->tcd = fsl_edma->membase
+ + i * drvdata->chreg_space_sz + drvdata->chreg_off + len;
+
+ fsl_chan->pdev = pdev;
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
- edma_writew(fsl_edma, 0x0, &regs->tcd[i].csr);
+ edma_write_tcdreg(fsl_chan, 0, csr);
fsl_edma_chan_mux(fsl_chan, 0, false);
}
- edma_writel(fsl_edma, ~0, regs->intl);
ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma);
if (ret)
return ret;
@@ -391,33 +570,47 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
+
+ if (drvdata->flags & FSL_EDMA_DRV_BUS_8BYTE) {
+ fsl_edma->dma_dev.src_addr_widths |= BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+ fsl_edma->dma_dev.dst_addr_widths |= BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+ }
+
fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ if (drvdata->flags & FSL_EDMA_DRV_DEV_TO_DEV)
+ fsl_edma->dma_dev.directions |= BIT(DMA_DEV_TO_DEV);
+
+ fsl_edma->dma_dev.copy_align = drvdata->flags & FSL_EDMA_DRV_ALIGN_64BYTE ?
+ DMAENGINE_ALIGN_64_BYTES :
+ DMAENGINE_ALIGN_32_BYTES;
- fsl_edma->dma_dev.copy_align = DMAENGINE_ALIGN_32_BYTES;
/* Per worst case 'nbytes = 1' take CITER as the max_seg_size */
dma_set_max_seg_size(fsl_edma->dma_dev.dev, 0x3fff);
+ fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+
platform_set_drvdata(pdev, fsl_edma);
ret = dma_async_device_register(&fsl_edma->dma_dev);
if (ret) {
dev_err(&pdev->dev,
"Can't register Freescale eDMA engine. (%d)\n", ret);
- fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
return ret;
}
- ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma);
+ ret = of_dma_controller_register(np,
+ drvdata->flags & FSL_EDMA_DRV_SPLIT_REG ? fsl_edma3_xlate : fsl_edma_xlate,
+ fsl_edma);
if (ret) {
dev_err(&pdev->dev,
"Can't register Freescale eDMA of_dma. (%d)\n", ret);
dma_async_device_unregister(&fsl_edma->dma_dev);
- fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
return ret;
}
/* enable round robin arbitration */
- edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
+ if (!(drvdata->flags & FSL_EDMA_DRV_SPLIT_REG))
+ edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
return 0;
}
@@ -470,7 +663,7 @@ static int fsl_edma_resume_early(struct device *dev)
for (i = 0; i < fsl_edma->n_chans; i++) {
fsl_chan = &fsl_edma->chans[i];
fsl_chan->pm_state = RUNNING;
- edma_writew(fsl_edma, 0x0, &regs->tcd[i].csr);
+ edma_write_tcdreg(fsl_chan, 0, csr);
if (fsl_chan->slave_id != 0)
fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
}
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index eddb2688f234..a8cc8a4bc610 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -13,10 +13,10 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
#include "virt-dma.h"
#include "fsldma.h"
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index fdf3500d96a9..0b9ca93ce3dc 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -60,9 +60,10 @@
*/
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_address.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/dmaengine.h>
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index f8459cc5315d..ddcf736d283d 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -28,9 +28,10 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/fsldma.h>
#include "dmaengine.h"
#include "fsldma.h"
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 9a15f0d12c79..22d6f4e455b7 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -299,21 +299,6 @@ void idxd_wqs_unmap_portal(struct idxd_device *idxd)
}
}
-static void __idxd_wq_set_priv_locked(struct idxd_wq *wq, int priv)
-{
- struct idxd_device *idxd = wq->idxd;
- union wqcfg wqcfg;
- unsigned int offset;
-
- offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PRIVL_IDX);
- spin_lock(&idxd->dev_lock);
- wqcfg.bits[WQCFG_PRIVL_IDX] = ioread32(idxd->reg_base + offset);
- wqcfg.priv = priv;
- wq->wqcfg->bits[WQCFG_PRIVL_IDX] = wqcfg.bits[WQCFG_PRIVL_IDX];
- iowrite32(wqcfg.bits[WQCFG_PRIVL_IDX], idxd->reg_base + offset);
- spin_unlock(&idxd->dev_lock);
-}
-
static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid)
{
struct idxd_device *idxd = wq->idxd;
@@ -784,8 +769,6 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
goto err_alloc;
}
- memset(addr, 0, size);
-
spin_lock(&evl->lock);
evl->log = addr;
evl->dma = dma_addr;
@@ -1421,15 +1404,14 @@ int drv_enable_wq(struct idxd_wq *wq)
}
/*
- * In the event that the WQ is configurable for pasid and priv bits.
- * For kernel wq, the driver should setup the pasid, pasid_en, and priv bit.
- * However, for non-kernel wq, the driver should only set the pasid_en bit for
- * shared wq. A dedicated wq that is not 'kernel' type will configure pasid and
+ * In the event that the WQ is configurable for pasid, the driver
+ * should setup the pasid, pasid_en bit. This is true for both kernel
+ * and user shared workqueues. There is no need to setup priv bit in
+ * that in-kernel DMA will also do user privileged requests.
+ * A dedicated wq that is not 'kernel' type will configure pasid and
* pasid_en later on so there is no need to setup.
*/
if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
- int priv = 0;
-
if (wq_pasid_enabled(wq)) {
if (is_idxd_wq_kernel(wq) || wq_shared(wq)) {
u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0;
@@ -1437,10 +1419,6 @@ int drv_enable_wq(struct idxd_wq *wq)
__idxd_wq_set_pasid_locked(wq, pasid);
}
}
-
- if (is_idxd_wq_kernel(wq))
- priv = 1;
- __idxd_wq_set_priv_locked(wq, priv);
}
rc = 0;
@@ -1548,6 +1526,15 @@ int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
if (rc < 0)
return -ENXIO;
+ /*
+ * System PASID is preserved across device disable/enable cycle, but
+ * genconfig register content gets cleared during device reset. We
+ * need to re-enable user interrupts for kernel work queue completion
+ * IRQ to function.
+ */
+ if (idxd->pasid != IOMMU_PASID_INVALID)
+ idxd_set_user_intr(idxd, 1);
+
rc = idxd_device_evl_setup(idxd);
if (rc < 0) {
idxd->cmd_status = IDXD_SCMD_DEV_EVL_ERR;
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index eb35ca313684..07623fb0f52f 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -75,9 +75,10 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq,
hw->xfer_size = len;
/*
* For dedicated WQ, this field is ignored and HW will use the WQCFG.priv
- * field instead. This field should be set to 1 for kernel descriptors.
+ * field instead. This field should be set to 0 for kernel descriptors
+ * since kernel DMA on VT-d supports "user" privilege only.
*/
- hw->priv = 1;
+ hw->priv = 0;
hw->completion_addr = compl;
}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 5428a2e1b1ec..e269ca1f4862 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -473,6 +473,15 @@ static inline struct idxd_device *ie_to_idxd(struct idxd_irq_entry *ie)
return container_of(ie, struct idxd_device, ie);
}
+static inline void idxd_set_user_intr(struct idxd_device *idxd, bool enable)
+{
+ union gencfg_reg reg;
+
+ reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+ reg.user_int_en = enable;
+ iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
+}
+
extern struct bus_type dsa_bus_type;
extern bool support_enqcmd;
@@ -651,8 +660,6 @@ int idxd_register_bus_type(void);
void idxd_unregister_bus_type(void);
int idxd_register_devices(struct idxd_device *idxd);
void idxd_unregister_devices(struct idxd_device *idxd);
-int idxd_register_driver(void);
-void idxd_unregister_driver(void);
void idxd_wqs_quiesce(struct idxd_device *idxd);
bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc);
void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count);
@@ -664,8 +671,6 @@ void idxd_mask_error_interrupts(struct idxd_device *idxd);
void idxd_unmask_error_interrupts(struct idxd_device *idxd);
/* device control */
-int idxd_register_idxd_drv(void);
-void idxd_unregister_idxd_drv(void);
int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
int drv_enable_wq(struct idxd_wq *wq);
@@ -710,7 +715,6 @@ int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc);
/* dmaengine */
int idxd_register_dma_device(struct idxd_device *idxd);
void idxd_unregister_dma_device(struct idxd_device *idxd);
-void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
void idxd_dma_complete_txd(struct idxd_desc *desc,
enum idxd_complete_type comp_type, bool free_desc);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 1aa823974cda..0eb1c827a215 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -550,14 +550,59 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d
static int idxd_enable_system_pasid(struct idxd_device *idxd)
{
- return -EOPNOTSUPP;
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ struct iommu_domain *domain;
+ ioasid_t pasid;
+ int ret;
+
+ /*
+ * Attach a global PASID to the DMA domain so that we can use ENQCMDS
+ * to submit work on buffers mapped by DMA API.
+ */
+ domain = iommu_get_domain_for_dev(dev);
+ if (!domain)
+ return -EPERM;
+
+ pasid = iommu_alloc_global_pasid(dev);
+ if (pasid == IOMMU_PASID_INVALID)
+ return -ENOSPC;
+
+ /*
+ * DMA domain is owned by the driver, it should support all valid
+ * types such as DMA-FQ, identity, etc.
+ */
+ ret = iommu_attach_device_pasid(domain, dev, pasid);
+ if (ret) {
+ dev_err(dev, "failed to attach device pasid %d, domain type %d",
+ pasid, domain->type);
+ iommu_free_global_pasid(pasid);
+ return ret;
+ }
+
+ /* Since we set user privilege for kernel DMA, enable completion IRQ */
+ idxd_set_user_intr(idxd, 1);
+ idxd->pasid = pasid;
+
+ return ret;
}
static void idxd_disable_system_pasid(struct idxd_device *idxd)
{
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ struct iommu_domain *domain;
+
+ domain = iommu_get_domain_for_dev(dev);
+ if (!domain)
+ return;
+
+ iommu_detach_device_pasid(domain, dev, idxd->pasid);
+ iommu_free_global_pasid(idxd->pasid);
- iommu_sva_unbind_device(idxd->sva);
+ idxd_set_user_intr(idxd, 0);
idxd->sva = NULL;
+ idxd->pasid = IOMMU_PASID_INVALID;
}
static int idxd_enable_sva(struct pci_dev *pdev)
@@ -600,8 +645,9 @@ static int idxd_probe(struct idxd_device *idxd)
} else {
set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
- if (idxd_enable_system_pasid(idxd))
- dev_warn(dev, "No in-kernel DMA with PASID.\n");
+ rc = idxd_enable_system_pasid(idxd);
+ if (rc)
+ dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc);
else
set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
}
diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c
index d73004f47cf4..fdda6d604262 100644
--- a/drivers/dma/idxd/perfmon.c
+++ b/drivers/dma/idxd/perfmon.c
@@ -245,12 +245,11 @@ static void perfmon_pmu_event_update(struct perf_event *event)
int shift = 64 - idxd->idxd_pmu->counter_width;
struct hw_perf_event *hwc = &event->hw;
+ prev_raw_count = local64_read(&hwc->prev_count);
do {
- prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = perfmon_pmu_read_counter(event);
- } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
- new_raw_count) != prev_raw_count);
-
+ } while (!local64_try_cmpxchg(&hwc->prev_count,
+ &prev_raw_count, new_raw_count));
n = (new_raw_count << shift);
p = (prev_raw_count << shift);
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 293739ac5596..7caba90d85b3 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -948,13 +948,6 @@ static ssize_t wq_name_store(struct device *dev,
if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
return -EINVAL;
- /*
- * This is temporarily placed here until we have SVM support for
- * dmaengine.
- */
- if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
- return -EOPNOTSUPP;
-
input = kstrndup(buf, count, GFP_KERNEL);
if (!input)
return -ENOMEM;
@@ -1095,8 +1088,8 @@ static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
- if (!idxd->hw.wq_cap.wq_ats_support)
- return -EOPNOTSUPP;
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
rc = kstrtobool(buf, &ats_dis);
if (rc < 0)
@@ -1131,8 +1124,8 @@ static ssize_t wq_prs_disable_store(struct device *dev, struct device_attribute
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
- if (!idxd->hw.wq_cap.wq_prs_support)
- return -EOPNOTSUPP;
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
rc = kstrtobool(buf, &prs_dis);
if (rc < 0)
@@ -1288,12 +1281,9 @@ static struct attribute *idxd_wq_attributes[] = {
NULL,
};
-static bool idxd_wq_attr_op_config_invisible(struct attribute *attr,
- struct idxd_device *idxd)
-{
- return attr == &dev_attr_wq_op_config.attr &&
- !idxd->hw.wq_cap.op_config;
-}
+/* A WQ attr is invisible if the feature is not supported in WQCAP. */
+#define idxd_wq_attr_invisible(name, cap_field, a, idxd) \
+ ((a) == &dev_attr_wq_##name.attr && !(idxd)->hw.wq_cap.cap_field)
static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr,
struct idxd_device *idxd)
@@ -1303,13 +1293,6 @@ static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr,
idxd->data->type == IDXD_TYPE_IAX;
}
-static bool idxd_wq_attr_wq_prs_disable_invisible(struct attribute *attr,
- struct idxd_device *idxd)
-{
- return attr == &dev_attr_wq_prs_disable.attr &&
- !idxd->hw.wq_cap.wq_prs_support;
-}
-
static umode_t idxd_wq_attr_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
@@ -1317,13 +1300,16 @@ static umode_t idxd_wq_attr_visible(struct kobject *kobj,
struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
- if (idxd_wq_attr_op_config_invisible(attr, idxd))
+ if (idxd_wq_attr_invisible(op_config, op_config, attr, idxd))
return 0;
if (idxd_wq_attr_max_batch_size_invisible(attr, idxd))
return 0;
- if (idxd_wq_attr_wq_prs_disable_invisible(attr, idxd))
+ if (idxd_wq_attr_invisible(prs_disable, wq_prs_support, attr, idxd))
+ return 0;
+
+ if (idxd_wq_attr_invisible(ats_disable, wq_ats_support, attr, idxd))
return 0;
return attr->mode;
@@ -1480,7 +1466,7 @@ static ssize_t pasid_enabled_show(struct device *dev,
{
struct idxd_device *idxd = confdev_to_idxd(dev);
- return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
+ return sysfs_emit(buf, "%u\n", device_user_pasid_enabled(idxd));
}
static DEVICE_ATTR_RO(pasid_enabled);
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index ad084552640f..9be0d3226e19 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -17,7 +17,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index f040751690af..114f254b9f50 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -21,7 +21,7 @@
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
#include <asm/irq.h>
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 7a912f90c2a9..51012bd39900 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -31,7 +31,6 @@
#include <linux/dmaengine.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/workqueue.h>
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 289c59ed74b9..17f6b6367113 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -51,7 +51,7 @@
/* pack PCI B/D/F into a u16 */
static inline u16 dcaid_from_pcidev(struct pci_dev *pci)
{
- return (pci->bus->number << 8) | pci->devfn;
+ return pci_dev_id(pci);
}
static int dca_enabled_in_bios(struct pci_dev *pdev)
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 35e06b382603..a180171087a8 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -74,6 +74,7 @@ struct ioatdma_device {
struct dca_provider *dca;
enum ioat_irq_mode irq_mode;
u32 cap;
+ int chancnt;
/* shadow version for CB3.3 chan reset errata workaround */
u64 msixtba0;
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index c4602bfc9c74..9c364e92cb82 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -420,7 +420,7 @@ int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma)
msix:
/* The number of MSI-X vectors should equal the number of channels */
- msixcnt = ioat_dma->dma_dev.chancnt;
+ msixcnt = ioat_dma->chancnt;
for (i = 0; i < msixcnt; i++)
ioat_dma->msix_entries[i].entry = i;
@@ -511,7 +511,7 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
dma->dev = &pdev->dev;
- if (!dma->chancnt) {
+ if (!ioat_dma->chancnt) {
dev_err(dev, "channel enumeration error\n");
goto err_setup_interrupts;
}
@@ -567,15 +567,16 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
struct device *dev = &ioat_dma->pdev->dev;
struct dma_device *dma = &ioat_dma->dma_dev;
u8 xfercap_log;
+ int chancnt;
int i;
INIT_LIST_HEAD(&dma->channels);
- dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
- dma->chancnt &= 0x1f; /* bits [4:0] valid */
- if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) {
+ chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
+ chancnt &= 0x1f; /* bits [4:0] valid */
+ if (chancnt > ARRAY_SIZE(ioat_dma->idx)) {
dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
- dma->chancnt, ARRAY_SIZE(ioat_dma->idx));
- dma->chancnt = ARRAY_SIZE(ioat_dma->idx);
+ chancnt, ARRAY_SIZE(ioat_dma->idx));
+ chancnt = ARRAY_SIZE(ioat_dma->idx);
}
xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
xfercap_log &= 0x1f; /* bits [4:0] valid */
@@ -583,7 +584,7 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
return;
dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
- for (i = 0; i < dma->chancnt; i++) {
+ for (i = 0; i < chancnt; i++) {
ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
if (!ioat_chan)
break;
@@ -596,7 +597,7 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
break;
}
}
- dma->chancnt = i;
+ ioat_dma->chancnt = i;
}
/**
diff --git a/drivers/dma/ipu/Makefile b/drivers/dma/ipu/Makefile
deleted file mode 100644
index c79ff116daf6..000000000000
--- a/drivers/dma/ipu/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-y += ipu_irq.o ipu_idmac.o
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
deleted file mode 100644
index d799b99c18bd..000000000000
--- a/drivers/dma/ipu/ipu_idmac.c
+++ /dev/null
@@ -1,1801 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2008
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/err.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/list.h>
-#include <linux/clk.h>
-#include <linux/vmalloc.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/dma/ipu-dma.h>
-
-#include "../dmaengine.h"
-#include "ipu_intern.h"
-
-#define FS_VF_IN_VALID 0x00000002
-#define FS_ENC_IN_VALID 0x00000001
-
-static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
- bool wait_for_stop);
-
-/*
- * There can be only one, we could allocate it dynamically, but then we'd have
- * to add an extra parameter to some functions, and use something as ugly as
- * struct ipu *ipu = to_ipu(to_idmac(ichan->dma_chan.device));
- * in the ISR
- */
-static struct ipu ipu_data;
-
-#define to_ipu(id) container_of(id, struct ipu, idmac)
-
-static u32 __idmac_read_icreg(struct ipu *ipu, unsigned long reg)
-{
- return __raw_readl(ipu->reg_ic + reg);
-}
-
-#define idmac_read_icreg(ipu, reg) __idmac_read_icreg(ipu, reg - IC_CONF)
-
-static void __idmac_write_icreg(struct ipu *ipu, u32 value, unsigned long reg)
-{
- __raw_writel(value, ipu->reg_ic + reg);
-}
-
-#define idmac_write_icreg(ipu, v, reg) __idmac_write_icreg(ipu, v, reg - IC_CONF)
-
-static u32 idmac_read_ipureg(struct ipu *ipu, unsigned long reg)
-{
- return __raw_readl(ipu->reg_ipu + reg);
-}
-
-static void idmac_write_ipureg(struct ipu *ipu, u32 value, unsigned long reg)
-{
- __raw_writel(value, ipu->reg_ipu + reg);
-}
-
-/*****************************************************************************
- * IPU / IC common functions
- */
-static void dump_idmac_reg(struct ipu *ipu)
-{
- dev_dbg(ipu->dev, "IDMAC_CONF 0x%x, IC_CONF 0x%x, IDMAC_CHA_EN 0x%x, "
- "IDMAC_CHA_PRI 0x%x, IDMAC_CHA_BUSY 0x%x\n",
- idmac_read_icreg(ipu, IDMAC_CONF),
- idmac_read_icreg(ipu, IC_CONF),
- idmac_read_icreg(ipu, IDMAC_CHA_EN),
- idmac_read_icreg(ipu, IDMAC_CHA_PRI),
- idmac_read_icreg(ipu, IDMAC_CHA_BUSY));
- dev_dbg(ipu->dev, "BUF0_RDY 0x%x, BUF1_RDY 0x%x, CUR_BUF 0x%x, "
- "DB_MODE 0x%x, TASKS_STAT 0x%x\n",
- idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY),
- idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY),
- idmac_read_ipureg(ipu, IPU_CHA_CUR_BUF),
- idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL),
- idmac_read_ipureg(ipu, IPU_TASKS_STAT));
-}
-
-static uint32_t bytes_per_pixel(enum pixel_fmt fmt)
-{
- switch (fmt) {
- case IPU_PIX_FMT_GENERIC: /* generic data */
- case IPU_PIX_FMT_RGB332:
- case IPU_PIX_FMT_YUV420P:
- case IPU_PIX_FMT_YUV422P:
- default:
- return 1;
- case IPU_PIX_FMT_RGB565:
- case IPU_PIX_FMT_YUYV:
- case IPU_PIX_FMT_UYVY:
- return 2;
- case IPU_PIX_FMT_BGR24:
- case IPU_PIX_FMT_RGB24:
- return 3;
- case IPU_PIX_FMT_GENERIC_32: /* generic data */
- case IPU_PIX_FMT_BGR32:
- case IPU_PIX_FMT_RGB32:
- case IPU_PIX_FMT_ABGR32:
- return 4;
- }
-}
-
-/* Enable direct write to memory by the Camera Sensor Interface */
-static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel)
-{
- uint32_t ic_conf, mask;
-
- switch (channel) {
- case IDMAC_IC_0:
- mask = IC_CONF_PRPENC_EN;
- break;
- case IDMAC_IC_7:
- mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN;
- break;
- default:
- return;
- }
- ic_conf = idmac_read_icreg(ipu, IC_CONF) | mask;
- idmac_write_icreg(ipu, ic_conf, IC_CONF);
-}
-
-/* Called under spin_lock_irqsave(&ipu_data.lock) */
-static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel)
-{
- uint32_t ic_conf, mask;
-
- switch (channel) {
- case IDMAC_IC_0:
- mask = IC_CONF_PRPENC_EN;
- break;
- case IDMAC_IC_7:
- mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN;
- break;
- default:
- return;
- }
- ic_conf = idmac_read_icreg(ipu, IC_CONF) & ~mask;
- idmac_write_icreg(ipu, ic_conf, IC_CONF);
-}
-
-static uint32_t ipu_channel_status(struct ipu *ipu, enum ipu_channel channel)
-{
- uint32_t stat = TASK_STAT_IDLE;
- uint32_t task_stat_reg = idmac_read_ipureg(ipu, IPU_TASKS_STAT);
-
- switch (channel) {
- case IDMAC_IC_7:
- stat = (task_stat_reg & TSTAT_CSI2MEM_MASK) >>
- TSTAT_CSI2MEM_OFFSET;
- break;
- case IDMAC_IC_0:
- case IDMAC_SDC_0:
- case IDMAC_SDC_1:
- default:
- break;
- }
- return stat;
-}
-
-struct chan_param_mem_planar {
- /* Word 0 */
- u32 xv:10;
- u32 yv:10;
- u32 xb:12;
-
- u32 yb:12;
- u32 res1:2;
- u32 nsb:1;
- u32 lnpb:6;
- u32 ubo_l:11;
-
- u32 ubo_h:15;
- u32 vbo_l:17;
-
- u32 vbo_h:9;
- u32 res2:3;
- u32 fw:12;
- u32 fh_l:8;
-
- u32 fh_h:4;
- u32 res3:28;
-
- /* Word 1 */
- u32 eba0;
-
- u32 eba1;
-
- u32 bpp:3;
- u32 sl:14;
- u32 pfs:3;
- u32 bam:3;
- u32 res4:2;
- u32 npb:6;
- u32 res5:1;
-
- u32 sat:2;
- u32 res6:30;
-} __attribute__ ((packed));
-
-struct chan_param_mem_interleaved {
- /* Word 0 */
- u32 xv:10;
- u32 yv:10;
- u32 xb:12;
-
- u32 yb:12;
- u32 sce:1;
- u32 res1:1;
- u32 nsb:1;
- u32 lnpb:6;
- u32 sx:10;
- u32 sy_l:1;
-
- u32 sy_h:9;
- u32 ns:10;
- u32 sm:10;
- u32 sdx_l:3;
-
- u32 sdx_h:2;
- u32 sdy:5;
- u32 sdrx:1;
- u32 sdry:1;
- u32 sdr1:1;
- u32 res2:2;
- u32 fw:12;
- u32 fh_l:8;
-
- u32 fh_h:4;
- u32 res3:28;
-
- /* Word 1 */
- u32 eba0;
-
- u32 eba1;
-
- u32 bpp:3;
- u32 sl:14;
- u32 pfs:3;
- u32 bam:3;
- u32 res4:2;
- u32 npb:6;
- u32 res5:1;
-
- u32 sat:2;
- u32 scc:1;
- u32 ofs0:5;
- u32 ofs1:5;
- u32 ofs2:5;
- u32 ofs3:5;
- u32 wid0:3;
- u32 wid1:3;
- u32 wid2:3;
-
- u32 wid3:3;
- u32 dec_sel:1;
- u32 res6:28;
-} __attribute__ ((packed));
-
-union chan_param_mem {
- struct chan_param_mem_planar pp;
- struct chan_param_mem_interleaved ip;
-};
-
-static void ipu_ch_param_set_plane_offset(union chan_param_mem *params,
- u32 u_offset, u32 v_offset)
-{
- params->pp.ubo_l = u_offset & 0x7ff;
- params->pp.ubo_h = u_offset >> 11;
- params->pp.vbo_l = v_offset & 0x1ffff;
- params->pp.vbo_h = v_offset >> 17;
-}
-
-static void ipu_ch_param_set_size(union chan_param_mem *params,
- uint32_t pixel_fmt, uint16_t width,
- uint16_t height, uint16_t stride)
-{
- u32 u_offset;
- u32 v_offset;
-
- params->pp.fw = width - 1;
- params->pp.fh_l = height - 1;
- params->pp.fh_h = (height - 1) >> 8;
- params->pp.sl = stride - 1;
-
- switch (pixel_fmt) {
- case IPU_PIX_FMT_GENERIC:
- /*Represents 8-bit Generic data */
- params->pp.bpp = 3;
- params->pp.pfs = 7;
- params->pp.npb = 31;
- params->pp.sat = 2; /* SAT = use 32-bit access */
- break;
- case IPU_PIX_FMT_GENERIC_32:
- /*Represents 32-bit Generic data */
- params->pp.bpp = 0;
- params->pp.pfs = 7;
- params->pp.npb = 7;
- params->pp.sat = 2; /* SAT = use 32-bit access */
- break;
- case IPU_PIX_FMT_RGB565:
- params->ip.bpp = 2;
- params->ip.pfs = 4;
- params->ip.npb = 15;
- params->ip.sat = 2; /* SAT = 32-bit access */
- params->ip.ofs0 = 0; /* Red bit offset */
- params->ip.ofs1 = 5; /* Green bit offset */
- params->ip.ofs2 = 11; /* Blue bit offset */
- params->ip.ofs3 = 16; /* Alpha bit offset */
- params->ip.wid0 = 4; /* Red bit width - 1 */
- params->ip.wid1 = 5; /* Green bit width - 1 */
- params->ip.wid2 = 4; /* Blue bit width - 1 */
- break;
- case IPU_PIX_FMT_BGR24:
- params->ip.bpp = 1; /* 24 BPP & RGB PFS */
- params->ip.pfs = 4;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- params->ip.ofs0 = 0; /* Red bit offset */
- params->ip.ofs1 = 8; /* Green bit offset */
- params->ip.ofs2 = 16; /* Blue bit offset */
- params->ip.ofs3 = 24; /* Alpha bit offset */
- params->ip.wid0 = 7; /* Red bit width - 1 */
- params->ip.wid1 = 7; /* Green bit width - 1 */
- params->ip.wid2 = 7; /* Blue bit width - 1 */
- break;
- case IPU_PIX_FMT_RGB24:
- params->ip.bpp = 1; /* 24 BPP & RGB PFS */
- params->ip.pfs = 4;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- params->ip.ofs0 = 16; /* Red bit offset */
- params->ip.ofs1 = 8; /* Green bit offset */
- params->ip.ofs2 = 0; /* Blue bit offset */
- params->ip.ofs3 = 24; /* Alpha bit offset */
- params->ip.wid0 = 7; /* Red bit width - 1 */
- params->ip.wid1 = 7; /* Green bit width - 1 */
- params->ip.wid2 = 7; /* Blue bit width - 1 */
- break;
- case IPU_PIX_FMT_BGRA32:
- case IPU_PIX_FMT_BGR32:
- case IPU_PIX_FMT_ABGR32:
- params->ip.bpp = 0;
- params->ip.pfs = 4;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- params->ip.ofs0 = 8; /* Red bit offset */
- params->ip.ofs1 = 16; /* Green bit offset */
- params->ip.ofs2 = 24; /* Blue bit offset */
- params->ip.ofs3 = 0; /* Alpha bit offset */
- params->ip.wid0 = 7; /* Red bit width - 1 */
- params->ip.wid1 = 7; /* Green bit width - 1 */
- params->ip.wid2 = 7; /* Blue bit width - 1 */
- params->ip.wid3 = 7; /* Alpha bit width - 1 */
- break;
- case IPU_PIX_FMT_RGBA32:
- case IPU_PIX_FMT_RGB32:
- params->ip.bpp = 0;
- params->ip.pfs = 4;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- params->ip.ofs0 = 24; /* Red bit offset */
- params->ip.ofs1 = 16; /* Green bit offset */
- params->ip.ofs2 = 8; /* Blue bit offset */
- params->ip.ofs3 = 0; /* Alpha bit offset */
- params->ip.wid0 = 7; /* Red bit width - 1 */
- params->ip.wid1 = 7; /* Green bit width - 1 */
- params->ip.wid2 = 7; /* Blue bit width - 1 */
- params->ip.wid3 = 7; /* Alpha bit width - 1 */
- break;
- case IPU_PIX_FMT_UYVY:
- params->ip.bpp = 2;
- params->ip.pfs = 6;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- break;
- case IPU_PIX_FMT_YUV420P2:
- case IPU_PIX_FMT_YUV420P:
- params->ip.bpp = 3;
- params->ip.pfs = 3;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- u_offset = stride * height;
- v_offset = u_offset + u_offset / 4;
- ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
- break;
- case IPU_PIX_FMT_YVU422P:
- params->ip.bpp = 3;
- params->ip.pfs = 2;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- v_offset = stride * height;
- u_offset = v_offset + v_offset / 2;
- ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
- break;
- case IPU_PIX_FMT_YUV422P:
- params->ip.bpp = 3;
- params->ip.pfs = 2;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- u_offset = stride * height;
- v_offset = u_offset + u_offset / 2;
- ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
- break;
- default:
- dev_err(ipu_data.dev,
- "mx3 ipu: unimplemented pixel format %d\n", pixel_fmt);
- break;
- }
-
- params->pp.nsb = 1;
-}
-
-static void ipu_ch_param_set_buffer(union chan_param_mem *params,
- dma_addr_t buf0, dma_addr_t buf1)
-{
- params->pp.eba0 = buf0;
- params->pp.eba1 = buf1;
-}
-
-static void ipu_ch_param_set_rotation(union chan_param_mem *params,
- enum ipu_rotate_mode rotate)
-{
- params->pp.bam = rotate;
-}
-
-static void ipu_write_param_mem(uint32_t addr, uint32_t *data,
- uint32_t num_words)
-{
- for (; num_words > 0; num_words--) {
- dev_dbg(ipu_data.dev,
- "write param mem - addr = 0x%08X, data = 0x%08X\n",
- addr, *data);
- idmac_write_ipureg(&ipu_data, addr, IPU_IMA_ADDR);
- idmac_write_ipureg(&ipu_data, *data++, IPU_IMA_DATA);
- addr++;
- if ((addr & 0x7) == 5) {
- addr &= ~0x7; /* set to word 0 */
- addr += 8; /* increment to next row */
- }
- }
-}
-
-static int calc_resize_coeffs(uint32_t in_size, uint32_t out_size,
- uint32_t *resize_coeff,
- uint32_t *downsize_coeff)
-{
- uint32_t temp_size;
- uint32_t temp_downsize;
-
- *resize_coeff = 1 << 13;
- *downsize_coeff = 1 << 13;
-
- /* Cannot downsize more than 8:1 */
- if (out_size << 3 < in_size)
- return -EINVAL;
-
- /* compute downsizing coefficient */
- temp_downsize = 0;
- temp_size = in_size;
- while (temp_size >= out_size * 2 && temp_downsize < 2) {
- temp_size >>= 1;
- temp_downsize++;
- }
- *downsize_coeff = temp_downsize;
-
- /*
- * compute resizing coefficient using the following formula:
- * resize_coeff = M*(SI -1)/(SO - 1)
- * where M = 2^13, SI - input size, SO - output size
- */
- *resize_coeff = (8192L * (temp_size - 1)) / (out_size - 1);
- if (*resize_coeff >= 16384L) {
- dev_err(ipu_data.dev, "Warning! Overflow on resize coeff.\n");
- *resize_coeff = 0x3FFF;
- }
-
- dev_dbg(ipu_data.dev, "resizing from %u -> %u pixels, "
- "downsize=%u, resize=%u.%lu (reg=%u)\n", in_size, out_size,
- *downsize_coeff, *resize_coeff >= 8192L ? 1 : 0,
- ((*resize_coeff & 0x1FFF) * 10000L) / 8192L, *resize_coeff);
-
- return 0;
-}
-
-static enum ipu_color_space format_to_colorspace(enum pixel_fmt fmt)
-{
- switch (fmt) {
- case IPU_PIX_FMT_RGB565:
- case IPU_PIX_FMT_BGR24:
- case IPU_PIX_FMT_RGB24:
- case IPU_PIX_FMT_BGR32:
- case IPU_PIX_FMT_RGB32:
- return IPU_COLORSPACE_RGB;
- default:
- return IPU_COLORSPACE_YCBCR;
- }
-}
-
-static int ipu_ic_init_prpenc(struct ipu *ipu,
- union ipu_channel_param *params, bool src_is_csi)
-{
- uint32_t reg, ic_conf;
- uint32_t downsize_coeff, resize_coeff;
- enum ipu_color_space in_fmt, out_fmt;
-
- /* Setup vertical resizing */
- calc_resize_coeffs(params->video.in_height,
- params->video.out_height,
- &resize_coeff, &downsize_coeff);
- reg = (downsize_coeff << 30) | (resize_coeff << 16);
-
- /* Setup horizontal resizing */
- calc_resize_coeffs(params->video.in_width,
- params->video.out_width,
- &resize_coeff, &downsize_coeff);
- reg |= (downsize_coeff << 14) | resize_coeff;
-
- /* Setup color space conversion */
- in_fmt = format_to_colorspace(params->video.in_pixel_fmt);
- out_fmt = format_to_colorspace(params->video.out_pixel_fmt);
-
- /*
- * Colourspace conversion unsupported yet - see _init_csc() in
- * Freescale sources
- */
- if (in_fmt != out_fmt) {
- dev_err(ipu->dev, "Colourspace conversion unsupported!\n");
- return -EOPNOTSUPP;
- }
-
- idmac_write_icreg(ipu, reg, IC_PRP_ENC_RSC);
-
- ic_conf = idmac_read_icreg(ipu, IC_CONF);
-
- if (src_is_csi)
- ic_conf &= ~IC_CONF_RWS_EN;
- else
- ic_conf |= IC_CONF_RWS_EN;
-
- idmac_write_icreg(ipu, ic_conf, IC_CONF);
-
- return 0;
-}
-
-static uint32_t dma_param_addr(uint32_t dma_ch)
-{
- /* Channel Parameter Memory */
- return 0x10000 | (dma_ch << 4);
-}
-
-static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel,
- bool prio)
-{
- u32 reg = idmac_read_icreg(ipu, IDMAC_CHA_PRI);
-
- if (prio)
- reg |= 1UL << channel;
- else
- reg &= ~(1UL << channel);
-
- idmac_write_icreg(ipu, reg, IDMAC_CHA_PRI);
-
- dump_idmac_reg(ipu);
-}
-
-static uint32_t ipu_channel_conf_mask(enum ipu_channel channel)
-{
- uint32_t mask;
-
- switch (channel) {
- case IDMAC_IC_0:
- case IDMAC_IC_7:
- mask = IPU_CONF_CSI_EN | IPU_CONF_IC_EN;
- break;
- case IDMAC_SDC_0:
- case IDMAC_SDC_1:
- mask = IPU_CONF_SDC_EN | IPU_CONF_DI_EN;
- break;
- default:
- mask = 0;
- break;
- }
-
- return mask;
-}
-
-/**
- * ipu_enable_channel() - enable an IPU channel.
- * @idmac: IPU DMAC context.
- * @ichan: IDMAC channel.
- * @return: 0 on success or negative error code on failure.
- */
-static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
-{
- struct ipu *ipu = to_ipu(idmac);
- enum ipu_channel channel = ichan->dma_chan.chan_id;
- uint32_t reg;
- unsigned long flags;
-
- spin_lock_irqsave(&ipu->lock, flags);
-
- /* Reset to buffer 0 */
- idmac_write_ipureg(ipu, 1UL << channel, IPU_CHA_CUR_BUF);
- ichan->active_buffer = 0;
- ichan->status = IPU_CHANNEL_ENABLED;
-
- switch (channel) {
- case IDMAC_SDC_0:
- case IDMAC_SDC_1:
- case IDMAC_IC_7:
- ipu_channel_set_priority(ipu, channel, true);
- break;
- default:
- break;
- }
-
- reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
-
- idmac_write_icreg(ipu, reg | (1UL << channel), IDMAC_CHA_EN);
-
- ipu_ic_enable_task(ipu, channel);
-
- spin_unlock_irqrestore(&ipu->lock, flags);
- return 0;
-}
-
-/**
- * ipu_init_channel_buffer() - initialize a buffer for logical IPU channel.
- * @ichan: IDMAC channel.
- * @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code.
- * @width: width of buffer in pixels.
- * @height: height of buffer in pixels.
- * @stride: stride length of buffer in pixels.
- * @rot_mode: rotation mode of buffer. A rotation setting other than
- * IPU_ROTATE_VERT_FLIP should only be used for input buffers of
- * rotation channels.
- * @phyaddr_0: buffer 0 physical address.
- * @phyaddr_1: buffer 1 physical address. Setting this to a value other than
- * NULL enables double buffering mode.
- * @return: 0 on success or negative error code on failure.
- */
-static int ipu_init_channel_buffer(struct idmac_channel *ichan,
- enum pixel_fmt pixel_fmt,
- uint16_t width, uint16_t height,
- uint32_t stride,
- enum ipu_rotate_mode rot_mode,
- dma_addr_t phyaddr_0, dma_addr_t phyaddr_1)
-{
- enum ipu_channel channel = ichan->dma_chan.chan_id;
- struct idmac *idmac = to_idmac(ichan->dma_chan.device);
- struct ipu *ipu = to_ipu(idmac);
- union chan_param_mem params = {};
- unsigned long flags;
- uint32_t reg;
- uint32_t stride_bytes;
-
- stride_bytes = stride * bytes_per_pixel(pixel_fmt);
-
- if (stride_bytes % 4) {
- dev_err(ipu->dev,
- "Stride length must be 32-bit aligned, stride = %d, bytes = %d\n",
- stride, stride_bytes);
- return -EINVAL;
- }
-
- /* IC channel's stride must be a multiple of 8 pixels */
- if ((channel <= IDMAC_IC_13) && (stride % 8)) {
- dev_err(ipu->dev, "Stride must be 8 pixel multiple\n");
- return -EINVAL;
- }
-
- /* Build parameter memory data for DMA channel */
- ipu_ch_param_set_size(&params, pixel_fmt, width, height, stride_bytes);
- ipu_ch_param_set_buffer(&params, phyaddr_0, phyaddr_1);
- ipu_ch_param_set_rotation(&params, rot_mode);
-
- spin_lock_irqsave(&ipu->lock, flags);
-
- ipu_write_param_mem(dma_param_addr(channel), (uint32_t *)&params, 10);
-
- reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL);
-
- if (phyaddr_1)
- reg |= 1UL << channel;
- else
- reg &= ~(1UL << channel);
-
- idmac_write_ipureg(ipu, reg, IPU_CHA_DB_MODE_SEL);
-
- ichan->status = IPU_CHANNEL_READY;
-
- spin_unlock_irqrestore(&ipu->lock, flags);
-
- return 0;
-}
-
-/**
- * ipu_select_buffer() - mark a channel's buffer as ready.
- * @channel: channel ID.
- * @buffer_n: buffer number to mark ready.
- */
-static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
-{
- /* No locking - this is a write-one-to-set register, cleared by IPU */
- if (buffer_n == 0)
- /* Mark buffer 0 as ready. */
- idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF0_RDY);
- else
- /* Mark buffer 1 as ready. */
- idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF1_RDY);
-}
-
-/**
- * ipu_update_channel_buffer() - update physical address of a channel buffer.
- * @ichan: IDMAC channel.
- * @buffer_n: buffer number to update.
- * 0 or 1 are the only valid values.
- * @phyaddr: buffer physical address.
- */
-/* Called under spin_lock(_irqsave)(&ichan->lock) */
-static void ipu_update_channel_buffer(struct idmac_channel *ichan,
- int buffer_n, dma_addr_t phyaddr)
-{
- enum ipu_channel channel = ichan->dma_chan.chan_id;
- uint32_t reg;
- unsigned long flags;
-
- spin_lock_irqsave(&ipu_data.lock, flags);
-
- if (buffer_n == 0) {
- reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
- if (reg & (1UL << channel)) {
- ipu_ic_disable_task(&ipu_data, channel);
- ichan->status = IPU_CHANNEL_READY;
- }
-
- /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */
- idmac_write_ipureg(&ipu_data, dma_param_addr(channel) +
- 0x0008UL, IPU_IMA_ADDR);
- idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA);
- } else {
- reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
- if (reg & (1UL << channel)) {
- ipu_ic_disable_task(&ipu_data, channel);
- ichan->status = IPU_CHANNEL_READY;
- }
-
- /* Check if double-buffering is already enabled */
- reg = idmac_read_ipureg(&ipu_data, IPU_CHA_DB_MODE_SEL);
-
- if (!(reg & (1UL << channel)))
- idmac_write_ipureg(&ipu_data, reg | (1UL << channel),
- IPU_CHA_DB_MODE_SEL);
-
- /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 1) */
- idmac_write_ipureg(&ipu_data, dma_param_addr(channel) +
- 0x0009UL, IPU_IMA_ADDR);
- idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA);
- }
-
- spin_unlock_irqrestore(&ipu_data.lock, flags);
-}
-
-/* Called under spin_lock_irqsave(&ichan->lock) */
-static int ipu_submit_buffer(struct idmac_channel *ichan,
- struct idmac_tx_desc *desc, struct scatterlist *sg, int buf_idx)
-{
- unsigned int chan_id = ichan->dma_chan.chan_id;
- struct device *dev = &ichan->dma_chan.dev->device;
-
- if (async_tx_test_ack(&desc->txd))
- return -EINTR;
-
- /*
- * On first invocation this shouldn't be necessary, the call to
- * ipu_init_channel_buffer() above will set addresses for us, so we
- * could make it conditional on status >= IPU_CHANNEL_ENABLED, but
- * doing it again shouldn't hurt either.
- */
- ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg));
-
- ipu_select_buffer(chan_id, buf_idx);
- dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
- sg, chan_id, buf_idx);
-
- return 0;
-}
-
-/* Called under spin_lock_irqsave(&ichan->lock) */
-static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
- struct idmac_tx_desc *desc)
-{
- struct scatterlist *sg;
- int i, ret = 0;
-
- for (i = 0, sg = desc->sg; i < 2 && sg; i++) {
- if (!ichan->sg[i]) {
- ichan->sg[i] = sg;
-
- ret = ipu_submit_buffer(ichan, desc, sg, i);
- if (ret < 0)
- return ret;
-
- sg = sg_next(sg);
- }
- }
-
- return ret;
-}
-
-static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
-{
- struct idmac_tx_desc *desc = to_tx_desc(tx);
- struct idmac_channel *ichan = to_idmac_chan(tx->chan);
- struct idmac *idmac = to_idmac(tx->chan->device);
- struct ipu *ipu = to_ipu(idmac);
- struct device *dev = &ichan->dma_chan.dev->device;
- dma_cookie_t cookie;
- unsigned long flags;
- int ret;
-
- /* Sanity check */
- if (!list_empty(&desc->list)) {
- /* The descriptor doesn't belong to client */
- dev_err(dev, "Descriptor %p not prepared!\n", tx);
- return -EBUSY;
- }
-
- mutex_lock(&ichan->chan_mutex);
-
- async_tx_clear_ack(tx);
-
- if (ichan->status < IPU_CHANNEL_READY) {
- struct idmac_video_param *video = &ichan->params.video;
- /*
- * Initial buffer assignment - the first two sg-entries from
- * the descriptor will end up in the IDMAC buffers
- */
- dma_addr_t dma_1 = sg_is_last(desc->sg) ? 0 :
- sg_dma_address(&desc->sg[1]);
-
- WARN_ON(ichan->sg[0] || ichan->sg[1]);
-
- cookie = ipu_init_channel_buffer(ichan,
- video->out_pixel_fmt,
- video->out_width,
- video->out_height,
- video->out_stride,
- IPU_ROTATE_NONE,
- sg_dma_address(&desc->sg[0]),
- dma_1);
- if (cookie < 0)
- goto out;
- }
-
- dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]);
-
- cookie = dma_cookie_assign(tx);
-
- /* ipu->lock can be taken under ichan->lock, but not v.v. */
- spin_lock_irqsave(&ichan->lock, flags);
-
- list_add_tail(&desc->list, &ichan->queue);
- /* submit_buffers() atomically verifies and fills empty sg slots */
- ret = ipu_submit_channel_buffers(ichan, desc);
-
- spin_unlock_irqrestore(&ichan->lock, flags);
-
- if (ret < 0) {
- cookie = ret;
- goto dequeue;
- }
-
- if (ichan->status < IPU_CHANNEL_ENABLED) {
- ret = ipu_enable_channel(idmac, ichan);
- if (ret < 0) {
- cookie = ret;
- goto dequeue;
- }
- }
-
- dump_idmac_reg(ipu);
-
-dequeue:
- if (cookie < 0) {
- spin_lock_irqsave(&ichan->lock, flags);
- list_del_init(&desc->list);
- spin_unlock_irqrestore(&ichan->lock, flags);
- tx->cookie = cookie;
- ichan->dma_chan.cookie = cookie;
- }
-
-out:
- mutex_unlock(&ichan->chan_mutex);
-
- return cookie;
-}
-
-/* Called with ichan->chan_mutex held */
-static int idmac_desc_alloc(struct idmac_channel *ichan, int n)
-{
- struct idmac_tx_desc *desc =
- vmalloc(array_size(n, sizeof(struct idmac_tx_desc)));
- struct idmac *idmac = to_idmac(ichan->dma_chan.device);
-
- if (!desc)
- return -ENOMEM;
-
- /* No interrupts, just disable the tasklet for a moment */
- tasklet_disable(&to_ipu(idmac)->tasklet);
-
- ichan->n_tx_desc = n;
- ichan->desc = desc;
- INIT_LIST_HEAD(&ichan->queue);
- INIT_LIST_HEAD(&ichan->free_list);
-
- while (n--) {
- struct dma_async_tx_descriptor *txd = &desc->txd;
-
- memset(txd, 0, sizeof(*txd));
- dma_async_tx_descriptor_init(txd, &ichan->dma_chan);
- txd->tx_submit = idmac_tx_submit;
-
- list_add(&desc->list, &ichan->free_list);
-
- desc++;
- }
-
- tasklet_enable(&to_ipu(idmac)->tasklet);
-
- return 0;
-}
-
-/**
- * ipu_init_channel() - initialize an IPU channel.
- * @idmac: IPU DMAC context.
- * @ichan: pointer to the channel object.
- * @return 0 on success or negative error code on failure.
- */
-static int ipu_init_channel(struct idmac *idmac, struct idmac_channel *ichan)
-{
- union ipu_channel_param *params = &ichan->params;
- uint32_t ipu_conf;
- enum ipu_channel channel = ichan->dma_chan.chan_id;
- unsigned long flags;
- uint32_t reg;
- struct ipu *ipu = to_ipu(idmac);
- int ret = 0, n_desc = 0;
-
- dev_dbg(ipu->dev, "init channel = %d\n", channel);
-
- if (channel != IDMAC_SDC_0 && channel != IDMAC_SDC_1 &&
- channel != IDMAC_IC_7)
- return -EINVAL;
-
- spin_lock_irqsave(&ipu->lock, flags);
-
- switch (channel) {
- case IDMAC_IC_7:
- n_desc = 16;
- reg = idmac_read_icreg(ipu, IC_CONF);
- idmac_write_icreg(ipu, reg & ~IC_CONF_CSI_MEM_WR_EN, IC_CONF);
- break;
- case IDMAC_IC_0:
- n_desc = 16;
- reg = idmac_read_ipureg(ipu, IPU_FS_PROC_FLOW);
- idmac_write_ipureg(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW);
- ret = ipu_ic_init_prpenc(ipu, params, true);
- break;
- case IDMAC_SDC_0:
- case IDMAC_SDC_1:
- n_desc = 4;
- break;
- default:
- break;
- }
-
- ipu->channel_init_mask |= 1L << channel;
-
- /* Enable IPU sub module */
- ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) |
- ipu_channel_conf_mask(channel);
- idmac_write_ipureg(ipu, ipu_conf, IPU_CONF);
-
- spin_unlock_irqrestore(&ipu->lock, flags);
-
- if (n_desc && !ichan->desc)
- ret = idmac_desc_alloc(ichan, n_desc);
-
- dump_idmac_reg(ipu);
-
- return ret;
-}
-
-/**
- * ipu_uninit_channel() - uninitialize an IPU channel.
- * @idmac: IPU DMAC context.
- * @ichan: pointer to the channel object.
- */
-static void ipu_uninit_channel(struct idmac *idmac, struct idmac_channel *ichan)
-{
- enum ipu_channel channel = ichan->dma_chan.chan_id;
- unsigned long flags;
- uint32_t reg;
- unsigned long chan_mask = 1UL << channel;
- uint32_t ipu_conf;
- struct ipu *ipu = to_ipu(idmac);
-
- spin_lock_irqsave(&ipu->lock, flags);
-
- if (!(ipu->channel_init_mask & chan_mask)) {
- dev_err(ipu->dev, "Channel already uninitialized %d\n",
- channel);
- spin_unlock_irqrestore(&ipu->lock, flags);
- return;
- }
-
- /* Reset the double buffer */
- reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL);
- idmac_write_ipureg(ipu, reg & ~chan_mask, IPU_CHA_DB_MODE_SEL);
-
- ichan->sec_chan_en = false;
-
- switch (channel) {
- case IDMAC_IC_7:
- reg = idmac_read_icreg(ipu, IC_CONF);
- idmac_write_icreg(ipu, reg & ~(IC_CONF_RWS_EN | IC_CONF_PRPENC_EN),
- IC_CONF);
- break;
- case IDMAC_IC_0:
- reg = idmac_read_icreg(ipu, IC_CONF);
- idmac_write_icreg(ipu, reg & ~(IC_CONF_PRPENC_EN | IC_CONF_PRPENC_CSC1),
- IC_CONF);
- break;
- case IDMAC_SDC_0:
- case IDMAC_SDC_1:
- default:
- break;
- }
-
- ipu->channel_init_mask &= ~(1L << channel);
-
- ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) &
- ~ipu_channel_conf_mask(channel);
- idmac_write_ipureg(ipu, ipu_conf, IPU_CONF);
-
- spin_unlock_irqrestore(&ipu->lock, flags);
-
- ichan->n_tx_desc = 0;
- vfree(ichan->desc);
- ichan->desc = NULL;
-}
-
-/**
- * ipu_disable_channel() - disable an IPU channel.
- * @idmac: IPU DMAC context.
- * @ichan: channel object pointer.
- * @wait_for_stop: flag to set whether to wait for channel end of frame or
- * return immediately.
- * @return: 0 on success or negative error code on failure.
- */
-static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
- bool wait_for_stop)
-{
- enum ipu_channel channel = ichan->dma_chan.chan_id;
- struct ipu *ipu = to_ipu(idmac);
- uint32_t reg;
- unsigned long flags;
- unsigned long chan_mask = 1UL << channel;
- unsigned int timeout;
-
- if (wait_for_stop && channel != IDMAC_SDC_1 && channel != IDMAC_SDC_0) {
- timeout = 40;
- /* This waiting always fails. Related to spurious irq problem */
- while ((idmac_read_icreg(ipu, IDMAC_CHA_BUSY) & chan_mask) ||
- (ipu_channel_status(ipu, channel) == TASK_STAT_ACTIVE)) {
- timeout--;
- msleep(10);
-
- if (!timeout) {
- dev_dbg(ipu->dev,
- "Warning: timeout waiting for channel %u to "
- "stop: buf0_rdy = 0x%08X, buf1_rdy = 0x%08X, "
- "busy = 0x%08X, tstat = 0x%08X\n", channel,
- idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY),
- idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY),
- idmac_read_icreg(ipu, IDMAC_CHA_BUSY),
- idmac_read_ipureg(ipu, IPU_TASKS_STAT));
- break;
- }
- }
- dev_dbg(ipu->dev, "timeout = %d * 10ms\n", 40 - timeout);
- }
- /* SDC BG and FG must be disabled before DMA is disabled */
- if (wait_for_stop && (channel == IDMAC_SDC_0 ||
- channel == IDMAC_SDC_1)) {
- for (timeout = 5;
- timeout && !ipu_irq_status(ichan->eof_irq); timeout--)
- msleep(5);
- }
-
- spin_lock_irqsave(&ipu->lock, flags);
-
- /* Disable IC task */
- ipu_ic_disable_task(ipu, channel);
-
- /* Disable DMA channel(s) */
- reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
- idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN);
-
- spin_unlock_irqrestore(&ipu->lock, flags);
-
- return 0;
-}
-
-static struct scatterlist *idmac_sg_next(struct idmac_channel *ichan,
- struct idmac_tx_desc **desc, struct scatterlist *sg)
-{
- struct scatterlist *sgnew = sg ? sg_next(sg) : NULL;
-
- if (sgnew)
- /* next sg-element in this list */
- return sgnew;
-
- if ((*desc)->list.next == &ichan->queue)
- /* No more descriptors on the queue */
- return NULL;
-
- /* Fetch next descriptor */
- *desc = list_entry((*desc)->list.next, struct idmac_tx_desc, list);
- return (*desc)->sg;
-}
-
-/*
- * We have several possibilities here:
- * current BUF next BUF
- *
- * not last sg next not last sg
- * not last sg next last sg
- * last sg first sg from next descriptor
- * last sg NULL
- *
- * Besides, the descriptor queue might be empty or not. We process all these
- * cases carefully.
- */
-static irqreturn_t idmac_interrupt(int irq, void *dev_id)
-{
- struct idmac_channel *ichan = dev_id;
- struct device *dev = &ichan->dma_chan.dev->device;
- unsigned int chan_id = ichan->dma_chan.chan_id;
- struct scatterlist **sg, *sgnext, *sgnew = NULL;
- /* Next transfer descriptor */
- struct idmac_tx_desc *desc, *descnew;
- bool done = false;
- u32 ready0, ready1, curbuf, err;
- struct dmaengine_desc_callback cb;
-
- /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
-
- dev_dbg(dev, "IDMAC irq %d, buf %d\n", irq, ichan->active_buffer);
-
- spin_lock(&ipu_data.lock);
-
- ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
- ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
- curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
- err = idmac_read_ipureg(&ipu_data, IPU_INT_STAT_4);
-
- if (err & (1 << chan_id)) {
- idmac_write_ipureg(&ipu_data, 1 << chan_id, IPU_INT_STAT_4);
- spin_unlock(&ipu_data.lock);
- /*
- * Doing this
- * ichan->sg[0] = ichan->sg[1] = NULL;
- * you can force channel re-enable on the next tx_submit(), but
- * this is dirty - think about descriptors with multiple
- * sg elements.
- */
- dev_warn(dev, "NFB4EOF on channel %d, ready %x, %x, cur %x\n",
- chan_id, ready0, ready1, curbuf);
- return IRQ_HANDLED;
- }
- spin_unlock(&ipu_data.lock);
-
- /* Other interrupts do not interfere with this channel */
- spin_lock(&ichan->lock);
- if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
- (!ichan->active_buffer && (ready0 >> chan_id) & 1)
- )) {
- spin_unlock(&ichan->lock);
- dev_dbg(dev,
- "IRQ with active buffer still ready on channel %x, "
- "active %d, ready %x, %x!\n", chan_id,
- ichan->active_buffer, ready0, ready1);
- return IRQ_NONE;
- }
-
- if (unlikely(list_empty(&ichan->queue))) {
- ichan->sg[ichan->active_buffer] = NULL;
- spin_unlock(&ichan->lock);
- dev_err(dev,
- "IRQ without queued buffers on channel %x, active %d, "
- "ready %x, %x!\n", chan_id,
- ichan->active_buffer, ready0, ready1);
- return IRQ_NONE;
- }
-
- /*
- * active_buffer is a software flag, it shows which buffer we are
- * currently expecting back from the hardware, IDMAC should be
- * processing the other buffer already
- */
- sg = &ichan->sg[ichan->active_buffer];
- sgnext = ichan->sg[!ichan->active_buffer];
-
- if (!*sg) {
- spin_unlock(&ichan->lock);
- return IRQ_HANDLED;
- }
-
- desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
- descnew = desc;
-
- dev_dbg(dev, "IDMAC irq %d, dma %#llx, next dma %#llx, current %d, curbuf %#x\n",
- irq, (u64)sg_dma_address(*sg),
- sgnext ? (u64)sg_dma_address(sgnext) : 0,
- ichan->active_buffer, curbuf);
-
- /* Find the descriptor of sgnext */
- sgnew = idmac_sg_next(ichan, &descnew, *sg);
- if (sgnext != sgnew)
- dev_err(dev, "Submitted buffer %p, next buffer %p\n", sgnext, sgnew);
-
- /*
- * if sgnext == NULL sg must be the last element in a scatterlist and
- * queue must be empty
- */
- if (unlikely(!sgnext)) {
- if (!WARN_ON(sg_next(*sg)))
- dev_dbg(dev, "Underrun on channel %x\n", chan_id);
- ichan->sg[!ichan->active_buffer] = sgnew;
-
- if (unlikely(sgnew)) {
- ipu_submit_buffer(ichan, descnew, sgnew, !ichan->active_buffer);
- } else {
- spin_lock(&ipu_data.lock);
- ipu_ic_disable_task(&ipu_data, chan_id);
- spin_unlock(&ipu_data.lock);
- ichan->status = IPU_CHANNEL_READY;
- /* Continue to check for complete descriptor */
- }
- }
-
- /* Calculate and submit the next sg element */
- sgnew = idmac_sg_next(ichan, &descnew, sgnew);
-
- if (unlikely(!sg_next(*sg)) || !sgnext) {
- /*
- * Last element in scatterlist done, remove from the queue,
- * _init for debugging
- */
- list_del_init(&desc->list);
- done = true;
- }
-
- *sg = sgnew;
-
- if (likely(sgnew) &&
- ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
- dmaengine_desc_get_callback(&descnew->txd, &cb);
-
- list_del_init(&descnew->list);
- spin_unlock(&ichan->lock);
-
- dmaengine_desc_callback_invoke(&cb, NULL);
- spin_lock(&ichan->lock);
- }
-
- /* Flip the active buffer - even if update above failed */
- ichan->active_buffer = !ichan->active_buffer;
- if (done)
- dma_cookie_complete(&desc->txd);
-
- dmaengine_desc_get_callback(&desc->txd, &cb);
-
- spin_unlock(&ichan->lock);
-
- if (done && (desc->txd.flags & DMA_PREP_INTERRUPT))
- dmaengine_desc_callback_invoke(&cb, NULL);
-
- return IRQ_HANDLED;
-}
-
-static void ipu_gc_tasklet(struct tasklet_struct *t)
-{
- struct ipu *ipu = from_tasklet(ipu, t, tasklet);
- int i;
-
- for (i = 0; i < IPU_CHANNELS_NUM; i++) {
- struct idmac_channel *ichan = ipu->channel + i;
- struct idmac_tx_desc *desc;
- unsigned long flags;
- struct scatterlist *sg;
- int j, k;
-
- for (j = 0; j < ichan->n_tx_desc; j++) {
- desc = ichan->desc + j;
- spin_lock_irqsave(&ichan->lock, flags);
- if (async_tx_test_ack(&desc->txd)) {
- list_move(&desc->list, &ichan->free_list);
- for_each_sg(desc->sg, sg, desc->sg_len, k) {
- if (ichan->sg[0] == sg)
- ichan->sg[0] = NULL;
- else if (ichan->sg[1] == sg)
- ichan->sg[1] = NULL;
- }
- async_tx_clear_ack(&desc->txd);
- }
- spin_unlock_irqrestore(&ichan->lock, flags);
- }
- }
-}
-
-/* Allocate and initialise a transfer descriptor. */
-static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
- struct scatterlist *sgl, unsigned int sg_len,
- enum dma_transfer_direction direction, unsigned long tx_flags,
- void *context)
-{
- struct idmac_channel *ichan = to_idmac_chan(chan);
- struct idmac_tx_desc *desc = NULL;
- struct dma_async_tx_descriptor *txd = NULL;
- unsigned long flags;
-
- /* We only can handle these three channels so far */
- if (chan->chan_id != IDMAC_SDC_0 && chan->chan_id != IDMAC_SDC_1 &&
- chan->chan_id != IDMAC_IC_7)
- return NULL;
-
- if (!is_slave_direction(direction)) {
- dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
- return NULL;
- }
-
- mutex_lock(&ichan->chan_mutex);
-
- spin_lock_irqsave(&ichan->lock, flags);
- if (!list_empty(&ichan->free_list)) {
- desc = list_entry(ichan->free_list.next,
- struct idmac_tx_desc, list);
-
- list_del_init(&desc->list);
-
- desc->sg_len = sg_len;
- desc->sg = sgl;
- txd = &desc->txd;
- txd->flags = tx_flags;
- }
- spin_unlock_irqrestore(&ichan->lock, flags);
-
- mutex_unlock(&ichan->chan_mutex);
-
- tasklet_schedule(&to_ipu(to_idmac(chan->device))->tasklet);
-
- return txd;
-}
-
-/* Re-select the current buffer and re-activate the channel */
-static void idmac_issue_pending(struct dma_chan *chan)
-{
- struct idmac_channel *ichan = to_idmac_chan(chan);
- struct idmac *idmac = to_idmac(chan->device);
- struct ipu *ipu = to_ipu(idmac);
- unsigned long flags;
-
- /* This is not always needed, but doesn't hurt either */
- spin_lock_irqsave(&ipu->lock, flags);
- ipu_select_buffer(chan->chan_id, ichan->active_buffer);
- spin_unlock_irqrestore(&ipu->lock, flags);
-
- /*
- * Might need to perform some parts of initialisation from
- * ipu_enable_channel(), but not all, we do not want to reset to buffer
- * 0, don't need to set priority again either, but re-enabling the task
- * and the channel might be a good idea.
- */
-}
-
-static int idmac_pause(struct dma_chan *chan)
-{
- struct idmac_channel *ichan = to_idmac_chan(chan);
- struct idmac *idmac = to_idmac(chan->device);
- struct ipu *ipu = to_ipu(idmac);
- struct list_head *list, *tmp;
- unsigned long flags;
-
- mutex_lock(&ichan->chan_mutex);
-
- spin_lock_irqsave(&ipu->lock, flags);
- ipu_ic_disable_task(ipu, chan->chan_id);
-
- /* Return all descriptors into "prepared" state */
- list_for_each_safe(list, tmp, &ichan->queue)
- list_del_init(list);
-
- ichan->sg[0] = NULL;
- ichan->sg[1] = NULL;
-
- spin_unlock_irqrestore(&ipu->lock, flags);
-
- ichan->status = IPU_CHANNEL_INITIALIZED;
-
- mutex_unlock(&ichan->chan_mutex);
-
- return 0;
-}
-
-static int __idmac_terminate_all(struct dma_chan *chan)
-{
- struct idmac_channel *ichan = to_idmac_chan(chan);
- struct idmac *idmac = to_idmac(chan->device);
- struct ipu *ipu = to_ipu(idmac);
- unsigned long flags;
- int i;
-
- ipu_disable_channel(idmac, ichan,
- ichan->status >= IPU_CHANNEL_ENABLED);
-
- tasklet_disable(&ipu->tasklet);
-
- /* ichan->queue is modified in ISR, have to spinlock */
- spin_lock_irqsave(&ichan->lock, flags);
- list_splice_init(&ichan->queue, &ichan->free_list);
-
- if (ichan->desc)
- for (i = 0; i < ichan->n_tx_desc; i++) {
- struct idmac_tx_desc *desc = ichan->desc + i;
- if (list_empty(&desc->list))
- /* Descriptor was prepared, but not submitted */
- list_add(&desc->list, &ichan->free_list);
-
- async_tx_clear_ack(&desc->txd);
- }
-
- ichan->sg[0] = NULL;
- ichan->sg[1] = NULL;
- spin_unlock_irqrestore(&ichan->lock, flags);
-
- tasklet_enable(&ipu->tasklet);
-
- ichan->status = IPU_CHANNEL_INITIALIZED;
-
- return 0;
-}
-
-static int idmac_terminate_all(struct dma_chan *chan)
-{
- struct idmac_channel *ichan = to_idmac_chan(chan);
- int ret;
-
- mutex_lock(&ichan->chan_mutex);
-
- ret = __idmac_terminate_all(chan);
-
- mutex_unlock(&ichan->chan_mutex);
-
- return ret;
-}
-
-#ifdef DEBUG
-static irqreturn_t ic_sof_irq(int irq, void *dev_id)
-{
- struct idmac_channel *ichan = dev_id;
- printk(KERN_DEBUG "Got SOF IRQ %d on Channel %d\n",
- irq, ichan->dma_chan.chan_id);
- disable_irq_nosync(irq);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ic_eof_irq(int irq, void *dev_id)
-{
- struct idmac_channel *ichan = dev_id;
- printk(KERN_DEBUG "Got EOF IRQ %d on Channel %d\n",
- irq, ichan->dma_chan.chan_id);
- disable_irq_nosync(irq);
- return IRQ_HANDLED;
-}
-
-static int ic_sof = -EINVAL, ic_eof = -EINVAL;
-#endif
-
-static int idmac_alloc_chan_resources(struct dma_chan *chan)
-{
- struct idmac_channel *ichan = to_idmac_chan(chan);
- struct idmac *idmac = to_idmac(chan->device);
- int ret;
-
- /* dmaengine.c now guarantees to only offer free channels */
- BUG_ON(chan->client_count > 1);
- WARN_ON(ichan->status != IPU_CHANNEL_FREE);
-
- dma_cookie_init(chan);
-
- ret = ipu_irq_map(chan->chan_id);
- if (ret < 0)
- goto eimap;
-
- ichan->eof_irq = ret;
-
- /*
- * Important to first disable the channel, because maybe someone
- * used it before us, e.g., the bootloader
- */
- ipu_disable_channel(idmac, ichan, true);
-
- ret = ipu_init_channel(idmac, ichan);
- if (ret < 0)
- goto eichan;
-
- ret = request_irq(ichan->eof_irq, idmac_interrupt, 0,
- ichan->eof_name, ichan);
- if (ret < 0)
- goto erirq;
-
-#ifdef DEBUG
- if (chan->chan_id == IDMAC_IC_7) {
- ic_sof = ipu_irq_map(69);
- if (ic_sof > 0) {
- ret = request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan);
- if (ret)
- dev_err(&chan->dev->device, "request irq failed for IC SOF");
- }
- ic_eof = ipu_irq_map(70);
- if (ic_eof > 0) {
- ret = request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan);
- if (ret)
- dev_err(&chan->dev->device, "request irq failed for IC EOF");
- }
- }
-#endif
-
- ichan->status = IPU_CHANNEL_INITIALIZED;
-
- dev_dbg(&chan->dev->device, "Found channel 0x%x, irq %d\n",
- chan->chan_id, ichan->eof_irq);
-
- return ret;
-
-erirq:
- ipu_uninit_channel(idmac, ichan);
-eichan:
- ipu_irq_unmap(chan->chan_id);
-eimap:
- return ret;
-}
-
-static void idmac_free_chan_resources(struct dma_chan *chan)
-{
- struct idmac_channel *ichan = to_idmac_chan(chan);
- struct idmac *idmac = to_idmac(chan->device);
-
- mutex_lock(&ichan->chan_mutex);
-
- __idmac_terminate_all(chan);
-
- if (ichan->status > IPU_CHANNEL_FREE) {
-#ifdef DEBUG
- if (chan->chan_id == IDMAC_IC_7) {
- if (ic_sof > 0) {
- free_irq(ic_sof, ichan);
- ipu_irq_unmap(69);
- ic_sof = -EINVAL;
- }
- if (ic_eof > 0) {
- free_irq(ic_eof, ichan);
- ipu_irq_unmap(70);
- ic_eof = -EINVAL;
- }
- }
-#endif
- free_irq(ichan->eof_irq, ichan);
- ipu_irq_unmap(chan->chan_id);
- }
-
- ichan->status = IPU_CHANNEL_FREE;
-
- ipu_uninit_channel(idmac, ichan);
-
- mutex_unlock(&ichan->chan_mutex);
-
- tasklet_schedule(&to_ipu(idmac)->tasklet);
-}
-
-static enum dma_status idmac_tx_status(struct dma_chan *chan,
- dma_cookie_t cookie, struct dma_tx_state *txstate)
-{
- return dma_cookie_status(chan, cookie, txstate);
-}
-
-static int __init ipu_idmac_init(struct ipu *ipu)
-{
- struct idmac *idmac = &ipu->idmac;
- struct dma_device *dma = &idmac->dma;
- int i;
-
- dma_cap_set(DMA_SLAVE, dma->cap_mask);
- dma_cap_set(DMA_PRIVATE, dma->cap_mask);
-
- /* Compulsory common fields */
- dma->dev = ipu->dev;
- dma->device_alloc_chan_resources = idmac_alloc_chan_resources;
- dma->device_free_chan_resources = idmac_free_chan_resources;
- dma->device_tx_status = idmac_tx_status;
- dma->device_issue_pending = idmac_issue_pending;
-
- /* Compulsory for DMA_SLAVE fields */
- dma->device_prep_slave_sg = idmac_prep_slave_sg;
- dma->device_pause = idmac_pause;
- dma->device_terminate_all = idmac_terminate_all;
-
- INIT_LIST_HEAD(&dma->channels);
- for (i = 0; i < IPU_CHANNELS_NUM; i++) {
- struct idmac_channel *ichan = ipu->channel + i;
- struct dma_chan *dma_chan = &ichan->dma_chan;
-
- spin_lock_init(&ichan->lock);
- mutex_init(&ichan->chan_mutex);
-
- ichan->status = IPU_CHANNEL_FREE;
- ichan->sec_chan_en = false;
- snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i);
-
- dma_chan->device = &idmac->dma;
- dma_cookie_init(dma_chan);
- dma_chan->chan_id = i;
- list_add_tail(&dma_chan->device_node, &dma->channels);
- }
-
- idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF);
-
- return dma_async_device_register(&idmac->dma);
-}
-
-static void ipu_idmac_exit(struct ipu *ipu)
-{
- int i;
- struct idmac *idmac = &ipu->idmac;
-
- for (i = 0; i < IPU_CHANNELS_NUM; i++) {
- struct idmac_channel *ichan = ipu->channel + i;
-
- idmac_terminate_all(&ichan->dma_chan);
- }
-
- dma_async_device_unregister(&idmac->dma);
-}
-
-/*****************************************************************************
- * IPU common probe / remove
- */
-
-static int __init ipu_probe(struct platform_device *pdev)
-{
- struct resource *mem_ipu, *mem_ic;
- int ret;
-
- spin_lock_init(&ipu_data.lock);
-
- mem_ipu = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mem_ic = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!mem_ipu || !mem_ic)
- return -EINVAL;
-
- ipu_data.dev = &pdev->dev;
-
- platform_set_drvdata(pdev, &ipu_data);
-
- ret = platform_get_irq(pdev, 0);
- if (ret < 0)
- goto err_noirq;
-
- ipu_data.irq_fn = ret;
- ret = platform_get_irq(pdev, 1);
- if (ret < 0)
- goto err_noirq;
-
- ipu_data.irq_err = ret;
-
- dev_dbg(&pdev->dev, "fn irq %u, err irq %u\n",
- ipu_data.irq_fn, ipu_data.irq_err);
-
- /* Remap IPU common registers */
- ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu));
- if (!ipu_data.reg_ipu) {
- ret = -ENOMEM;
- goto err_ioremap_ipu;
- }
-
- /* Remap Image Converter and Image DMA Controller registers */
- ipu_data.reg_ic = ioremap(mem_ic->start, resource_size(mem_ic));
- if (!ipu_data.reg_ic) {
- ret = -ENOMEM;
- goto err_ioremap_ic;
- }
-
- /* Get IPU clock */
- ipu_data.ipu_clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(ipu_data.ipu_clk)) {
- ret = PTR_ERR(ipu_data.ipu_clk);
- goto err_clk_get;
- }
-
- /* Make sure IPU HSP clock is running */
- clk_prepare_enable(ipu_data.ipu_clk);
-
- /* Disable all interrupts */
- idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1);
- idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_2);
- idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_3);
- idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_4);
- idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_5);
-
- dev_dbg(&pdev->dev, "%s @ 0x%08lx, fn irq %u, err irq %u\n", pdev->name,
- (unsigned long)mem_ipu->start, ipu_data.irq_fn, ipu_data.irq_err);
-
- ret = ipu_irq_attach_irq(&ipu_data, pdev);
- if (ret < 0)
- goto err_attach_irq;
-
- /* Initialize DMA engine */
- ret = ipu_idmac_init(&ipu_data);
- if (ret < 0)
- goto err_idmac_init;
-
- tasklet_setup(&ipu_data.tasklet, ipu_gc_tasklet);
-
- ipu_data.dev = &pdev->dev;
-
- dev_dbg(ipu_data.dev, "IPU initialized\n");
-
- return 0;
-
-err_idmac_init:
-err_attach_irq:
- ipu_irq_detach_irq(&ipu_data, pdev);
- clk_disable_unprepare(ipu_data.ipu_clk);
- clk_put(ipu_data.ipu_clk);
-err_clk_get:
- iounmap(ipu_data.reg_ic);
-err_ioremap_ic:
- iounmap(ipu_data.reg_ipu);
-err_ioremap_ipu:
-err_noirq:
- dev_err(&pdev->dev, "Failed to probe IPU: %d\n", ret);
- return ret;
-}
-
-static int ipu_remove(struct platform_device *pdev)
-{
- struct ipu *ipu = platform_get_drvdata(pdev);
-
- ipu_idmac_exit(ipu);
- ipu_irq_detach_irq(ipu, pdev);
- clk_disable_unprepare(ipu->ipu_clk);
- clk_put(ipu->ipu_clk);
- iounmap(ipu->reg_ic);
- iounmap(ipu->reg_ipu);
- tasklet_kill(&ipu->tasklet);
-
- return 0;
-}
-
-/*
- * We need two MEM resources - with IPU-common and Image Converter registers,
- * including PF_CONF and IDMAC_* registers, and two IRQs - function and error
- */
-static struct platform_driver ipu_platform_driver = {
- .driver = {
- .name = "ipu-core",
- },
- .remove = ipu_remove,
-};
-
-static int __init ipu_init(void)
-{
- return platform_driver_probe(&ipu_platform_driver, ipu_probe);
-}
-subsys_initcall(ipu_init);
-
-MODULE_DESCRIPTION("IPU core driver");
-MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
-MODULE_ALIAS("platform:ipu-core");
diff --git a/drivers/dma/ipu/ipu_intern.h b/drivers/dma/ipu/ipu_intern.h
deleted file mode 100644
index e7ec1dec3edf..000000000000
--- a/drivers/dma/ipu/ipu_intern.h
+++ /dev/null
@@ -1,173 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2008
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- */
-
-#ifndef _IPU_INTERN_H_
-#define _IPU_INTERN_H_
-
-#include <linux/dmaengine.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-
-/* IPU Common registers */
-#define IPU_CONF 0x00
-#define IPU_CHA_BUF0_RDY 0x04
-#define IPU_CHA_BUF1_RDY 0x08
-#define IPU_CHA_DB_MODE_SEL 0x0C
-#define IPU_CHA_CUR_BUF 0x10
-#define IPU_FS_PROC_FLOW 0x14
-#define IPU_FS_DISP_FLOW 0x18
-#define IPU_TASKS_STAT 0x1C
-#define IPU_IMA_ADDR 0x20
-#define IPU_IMA_DATA 0x24
-#define IPU_INT_CTRL_1 0x28
-#define IPU_INT_CTRL_2 0x2C
-#define IPU_INT_CTRL_3 0x30
-#define IPU_INT_CTRL_4 0x34
-#define IPU_INT_CTRL_5 0x38
-#define IPU_INT_STAT_1 0x3C
-#define IPU_INT_STAT_2 0x40
-#define IPU_INT_STAT_3 0x44
-#define IPU_INT_STAT_4 0x48
-#define IPU_INT_STAT_5 0x4C
-#define IPU_BRK_CTRL_1 0x50
-#define IPU_BRK_CTRL_2 0x54
-#define IPU_BRK_STAT 0x58
-#define IPU_DIAGB_CTRL 0x5C
-
-/* IPU_CONF Register bits */
-#define IPU_CONF_CSI_EN 0x00000001
-#define IPU_CONF_IC_EN 0x00000002
-#define IPU_CONF_ROT_EN 0x00000004
-#define IPU_CONF_PF_EN 0x00000008
-#define IPU_CONF_SDC_EN 0x00000010
-#define IPU_CONF_ADC_EN 0x00000020
-#define IPU_CONF_DI_EN 0x00000040
-#define IPU_CONF_DU_EN 0x00000080
-#define IPU_CONF_PXL_ENDIAN 0x00000100
-
-/* Image Converter Registers */
-#define IC_CONF 0x88
-#define IC_PRP_ENC_RSC 0x8C
-#define IC_PRP_VF_RSC 0x90
-#define IC_PP_RSC 0x94
-#define IC_CMBP_1 0x98
-#define IC_CMBP_2 0x9C
-#define PF_CONF 0xA0
-#define IDMAC_CONF 0xA4
-#define IDMAC_CHA_EN 0xA8
-#define IDMAC_CHA_PRI 0xAC
-#define IDMAC_CHA_BUSY 0xB0
-
-/* Image Converter Register bits */
-#define IC_CONF_PRPENC_EN 0x00000001
-#define IC_CONF_PRPENC_CSC1 0x00000002
-#define IC_CONF_PRPENC_ROT_EN 0x00000004
-#define IC_CONF_PRPVF_EN 0x00000100
-#define IC_CONF_PRPVF_CSC1 0x00000200
-#define IC_CONF_PRPVF_CSC2 0x00000400
-#define IC_CONF_PRPVF_CMB 0x00000800
-#define IC_CONF_PRPVF_ROT_EN 0x00001000
-#define IC_CONF_PP_EN 0x00010000
-#define IC_CONF_PP_CSC1 0x00020000
-#define IC_CONF_PP_CSC2 0x00040000
-#define IC_CONF_PP_CMB 0x00080000
-#define IC_CONF_PP_ROT_EN 0x00100000
-#define IC_CONF_IC_GLB_LOC_A 0x10000000
-#define IC_CONF_KEY_COLOR_EN 0x20000000
-#define IC_CONF_RWS_EN 0x40000000
-#define IC_CONF_CSI_MEM_WR_EN 0x80000000
-
-#define IDMA_CHAN_INVALID 0x000000FF
-#define IDMA_IC_0 0x00000001
-#define IDMA_IC_1 0x00000002
-#define IDMA_IC_2 0x00000004
-#define IDMA_IC_3 0x00000008
-#define IDMA_IC_4 0x00000010
-#define IDMA_IC_5 0x00000020
-#define IDMA_IC_6 0x00000040
-#define IDMA_IC_7 0x00000080
-#define IDMA_IC_8 0x00000100
-#define IDMA_IC_9 0x00000200
-#define IDMA_IC_10 0x00000400
-#define IDMA_IC_11 0x00000800
-#define IDMA_IC_12 0x00001000
-#define IDMA_IC_13 0x00002000
-#define IDMA_SDC_BG 0x00004000
-#define IDMA_SDC_FG 0x00008000
-#define IDMA_SDC_MASK 0x00010000
-#define IDMA_SDC_PARTIAL 0x00020000
-#define IDMA_ADC_SYS1_WR 0x00040000
-#define IDMA_ADC_SYS2_WR 0x00080000
-#define IDMA_ADC_SYS1_CMD 0x00100000
-#define IDMA_ADC_SYS2_CMD 0x00200000
-#define IDMA_ADC_SYS1_RD 0x00400000
-#define IDMA_ADC_SYS2_RD 0x00800000
-#define IDMA_PF_QP 0x01000000
-#define IDMA_PF_BSP 0x02000000
-#define IDMA_PF_Y_IN 0x04000000
-#define IDMA_PF_U_IN 0x08000000
-#define IDMA_PF_V_IN 0x10000000
-#define IDMA_PF_Y_OUT 0x20000000
-#define IDMA_PF_U_OUT 0x40000000
-#define IDMA_PF_V_OUT 0x80000000
-
-#define TSTAT_PF_H264_PAUSE 0x00000001
-#define TSTAT_CSI2MEM_MASK 0x0000000C
-#define TSTAT_CSI2MEM_OFFSET 2
-#define TSTAT_VF_MASK 0x00000600
-#define TSTAT_VF_OFFSET 9
-#define TSTAT_VF_ROT_MASK 0x000C0000
-#define TSTAT_VF_ROT_OFFSET 18
-#define TSTAT_ENC_MASK 0x00000180
-#define TSTAT_ENC_OFFSET 7
-#define TSTAT_ENC_ROT_MASK 0x00030000
-#define TSTAT_ENC_ROT_OFFSET 16
-#define TSTAT_PP_MASK 0x00001800
-#define TSTAT_PP_OFFSET 11
-#define TSTAT_PP_ROT_MASK 0x00300000
-#define TSTAT_PP_ROT_OFFSET 20
-#define TSTAT_PF_MASK 0x00C00000
-#define TSTAT_PF_OFFSET 22
-#define TSTAT_ADCSYS1_MASK 0x03000000
-#define TSTAT_ADCSYS1_OFFSET 24
-#define TSTAT_ADCSYS2_MASK 0x0C000000
-#define TSTAT_ADCSYS2_OFFSET 26
-
-#define TASK_STAT_IDLE 0
-#define TASK_STAT_ACTIVE 1
-#define TASK_STAT_WAIT4READY 2
-
-struct idmac {
- struct dma_device dma;
-};
-
-struct ipu {
- void __iomem *reg_ipu;
- void __iomem *reg_ic;
- unsigned int irq_fn; /* IPU Function IRQ to the CPU */
- unsigned int irq_err; /* IPU Error IRQ to the CPU */
- unsigned int irq_base; /* Beginning of the IPU IRQ range */
- unsigned long channel_init_mask;
- spinlock_t lock;
- struct clk *ipu_clk;
- struct device *dev;
- struct idmac idmac;
- struct idmac_channel channel[IPU_CHANNELS_NUM];
- struct tasklet_struct tasklet;
-};
-
-#define to_idmac(d) container_of(d, struct idmac, dma)
-
-extern int ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev);
-extern void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev);
-
-extern bool ipu_irq_status(uint32_t irq);
-extern int ipu_irq_map(unsigned int source);
-extern int ipu_irq_unmap(unsigned int source);
-
-#endif
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
deleted file mode 100644
index 97d9a6f04f2a..000000000000
--- a/drivers/dma/ipu/ipu_irq.c
+++ /dev/null
@@ -1,367 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2008
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- */
-
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/dma/ipu-dma.h>
-
-#include "ipu_intern.h"
-
-/*
- * Register read / write - shall be inlined by the compiler
- */
-static u32 ipu_read_reg(struct ipu *ipu, unsigned long reg)
-{
- return __raw_readl(ipu->reg_ipu + reg);
-}
-
-static void ipu_write_reg(struct ipu *ipu, u32 value, unsigned long reg)
-{
- __raw_writel(value, ipu->reg_ipu + reg);
-}
-
-
-/*
- * IPU IRQ chip driver
- */
-
-#define IPU_IRQ_NR_FN_BANKS 3
-#define IPU_IRQ_NR_ERR_BANKS 2
-#define IPU_IRQ_NR_BANKS (IPU_IRQ_NR_FN_BANKS + IPU_IRQ_NR_ERR_BANKS)
-
-struct ipu_irq_bank {
- unsigned int control;
- unsigned int status;
- struct ipu *ipu;
-};
-
-static struct ipu_irq_bank irq_bank[IPU_IRQ_NR_BANKS] = {
- /* 3 groups of functional interrupts */
- {
- .control = IPU_INT_CTRL_1,
- .status = IPU_INT_STAT_1,
- }, {
- .control = IPU_INT_CTRL_2,
- .status = IPU_INT_STAT_2,
- }, {
- .control = IPU_INT_CTRL_3,
- .status = IPU_INT_STAT_3,
- },
- /* 2 groups of error interrupts */
- {
- .control = IPU_INT_CTRL_4,
- .status = IPU_INT_STAT_4,
- }, {
- .control = IPU_INT_CTRL_5,
- .status = IPU_INT_STAT_5,
- },
-};
-
-struct ipu_irq_map {
- unsigned int irq;
- int source;
- struct ipu_irq_bank *bank;
- struct ipu *ipu;
-};
-
-static struct ipu_irq_map irq_map[CONFIG_MX3_IPU_IRQS];
-/* Protects allocations from the above array of maps */
-static DEFINE_MUTEX(map_lock);
-/* Protects register accesses and individual mappings */
-static DEFINE_RAW_SPINLOCK(bank_lock);
-
-static struct ipu_irq_map *src2map(unsigned int src)
-{
- int i;
-
- for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++)
- if (irq_map[i].source == src)
- return irq_map + i;
-
- return NULL;
-}
-
-static void ipu_irq_unmask(struct irq_data *d)
-{
- struct ipu_irq_map *map = irq_data_get_irq_chip_data(d);
- struct ipu_irq_bank *bank;
- uint32_t reg;
- unsigned long lock_flags;
-
- raw_spin_lock_irqsave(&bank_lock, lock_flags);
-
- bank = map->bank;
- if (!bank) {
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
- pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
- return;
- }
-
- reg = ipu_read_reg(bank->ipu, bank->control);
- reg |= (1UL << (map->source & 31));
- ipu_write_reg(bank->ipu, reg, bank->control);
-
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
-}
-
-static void ipu_irq_mask(struct irq_data *d)
-{
- struct ipu_irq_map *map = irq_data_get_irq_chip_data(d);
- struct ipu_irq_bank *bank;
- uint32_t reg;
- unsigned long lock_flags;
-
- raw_spin_lock_irqsave(&bank_lock, lock_flags);
-
- bank = map->bank;
- if (!bank) {
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
- pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
- return;
- }
-
- reg = ipu_read_reg(bank->ipu, bank->control);
- reg &= ~(1UL << (map->source & 31));
- ipu_write_reg(bank->ipu, reg, bank->control);
-
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
-}
-
-static void ipu_irq_ack(struct irq_data *d)
-{
- struct ipu_irq_map *map = irq_data_get_irq_chip_data(d);
- struct ipu_irq_bank *bank;
- unsigned long lock_flags;
-
- raw_spin_lock_irqsave(&bank_lock, lock_flags);
-
- bank = map->bank;
- if (!bank) {
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
- pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
- return;
- }
-
- ipu_write_reg(bank->ipu, 1UL << (map->source & 31), bank->status);
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
-}
-
-/**
- * ipu_irq_status() - returns the current interrupt status of the specified IRQ.
- * @irq: interrupt line to get status for.
- * @return: true if the interrupt is pending/asserted or false if the
- * interrupt is not pending.
- */
-bool ipu_irq_status(unsigned int irq)
-{
- struct ipu_irq_map *map = irq_get_chip_data(irq);
- struct ipu_irq_bank *bank;
- unsigned long lock_flags;
- bool ret;
-
- raw_spin_lock_irqsave(&bank_lock, lock_flags);
- bank = map->bank;
- ret = bank && ipu_read_reg(bank->ipu, bank->status) &
- (1UL << (map->source & 31));
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
-
- return ret;
-}
-
-/**
- * ipu_irq_map() - map an IPU interrupt source to an IRQ number
- * @source: interrupt source bit position (see below)
- * @return: mapped IRQ number or negative error code
- *
- * The source parameter has to be explained further. On i.MX31 IPU has 137 IRQ
- * sources, they are broken down in 5 32-bit registers, like 32, 32, 24, 32, 17.
- * However, the source argument of this function is not the sequence number of
- * the possible IRQ, but rather its bit position. So, first interrupt in fourth
- * register has source number 96, and not 88. This makes calculations easier,
- * and also provides forward compatibility with any future IPU implementations
- * with any interrupt bit assignments.
- */
-int ipu_irq_map(unsigned int source)
-{
- int i, ret = -ENOMEM;
- struct ipu_irq_map *map;
-
- might_sleep();
-
- mutex_lock(&map_lock);
- map = src2map(source);
- if (map) {
- pr_err("IPU: Source %u already mapped to IRQ %u\n", source, map->irq);
- ret = -EBUSY;
- goto out;
- }
-
- for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) {
- if (irq_map[i].source < 0) {
- unsigned long lock_flags;
-
- raw_spin_lock_irqsave(&bank_lock, lock_flags);
- irq_map[i].source = source;
- irq_map[i].bank = irq_bank + source / 32;
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
-
- ret = irq_map[i].irq;
- pr_debug("IPU: mapped source %u to IRQ %u\n",
- source, ret);
- break;
- }
- }
-out:
- mutex_unlock(&map_lock);
-
- if (ret < 0)
- pr_err("IPU: couldn't map source %u: %d\n", source, ret);
-
- return ret;
-}
-
-/**
- * ipu_irq_unmap() - unmap an IPU interrupt source
- * @source: interrupt source bit position (see ipu_irq_map())
- * @return: 0 or negative error code
- */
-int ipu_irq_unmap(unsigned int source)
-{
- int i, ret = -EINVAL;
-
- might_sleep();
-
- mutex_lock(&map_lock);
- for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) {
- if (irq_map[i].source == source) {
- unsigned long lock_flags;
-
- pr_debug("IPU: unmapped source %u from IRQ %u\n",
- source, irq_map[i].irq);
-
- raw_spin_lock_irqsave(&bank_lock, lock_flags);
- irq_map[i].source = -EINVAL;
- irq_map[i].bank = NULL;
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
-
- ret = 0;
- break;
- }
- }
- mutex_unlock(&map_lock);
-
- return ret;
-}
-
-/* Chained IRQ handler for IPU function and error interrupt */
-static void ipu_irq_handler(struct irq_desc *desc)
-{
- struct ipu *ipu = irq_desc_get_handler_data(desc);
- u32 status;
- int i, line;
-
- for (i = 0; i < IPU_IRQ_NR_BANKS; i++) {
- struct ipu_irq_bank *bank = irq_bank + i;
-
- raw_spin_lock(&bank_lock);
- status = ipu_read_reg(ipu, bank->status);
- /*
- * Don't think we have to clear all interrupts here, they will
- * be acked by ->handle_irq() (handle_level_irq). However, we
- * might want to clear unhandled interrupts after the loop...
- */
- status &= ipu_read_reg(ipu, bank->control);
- raw_spin_unlock(&bank_lock);
- while ((line = ffs(status))) {
- struct ipu_irq_map *map;
- unsigned int irq;
-
- line--;
- status &= ~(1UL << line);
-
- raw_spin_lock(&bank_lock);
- map = src2map(32 * i + line);
- if (!map) {
- raw_spin_unlock(&bank_lock);
- pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
- line, i);
- continue;
- }
- irq = map->irq;
- raw_spin_unlock(&bank_lock);
- generic_handle_irq(irq);
- }
- }
-}
-
-static struct irq_chip ipu_irq_chip = {
- .name = "ipu_irq",
- .irq_ack = ipu_irq_ack,
- .irq_mask = ipu_irq_mask,
- .irq_unmask = ipu_irq_unmask,
-};
-
-/* Install the IRQ handler */
-int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev)
-{
- unsigned int irq, i;
- int irq_base = irq_alloc_descs(-1, 0, CONFIG_MX3_IPU_IRQS,
- numa_node_id());
-
- if (irq_base < 0)
- return irq_base;
-
- for (i = 0; i < IPU_IRQ_NR_BANKS; i++)
- irq_bank[i].ipu = ipu;
-
- for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) {
- int ret;
-
- irq = irq_base + i;
- ret = irq_set_chip(irq, &ipu_irq_chip);
- if (ret < 0)
- return ret;
- ret = irq_set_chip_data(irq, irq_map + i);
- if (ret < 0)
- return ret;
- irq_map[i].ipu = ipu;
- irq_map[i].irq = irq;
- irq_map[i].source = -EINVAL;
- irq_set_handler(irq, handle_level_irq);
- irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
- }
-
- irq_set_chained_handler_and_data(ipu->irq_fn, ipu_irq_handler, ipu);
-
- irq_set_chained_handler_and_data(ipu->irq_err, ipu_irq_handler, ipu);
-
- ipu->irq_base = irq_base;
-
- return 0;
-}
-
-void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev)
-{
- unsigned int irq, irq_base;
-
- irq_base = ipu->irq_base;
-
- irq_set_chained_handler_and_data(ipu->irq_fn, NULL, NULL);
-
- irq_set_chained_handler_and_data(ipu->irq_err, NULL, NULL);
-
- for (irq = irq_base; irq < irq_base + CONFIG_MX3_IPU_IRQS; irq++) {
- irq_set_status_flags(irq, IRQ_NOREQUEST);
- irq_set_chip(irq, NULL);
- irq_set_chip_data(irq, NULL);
- }
-}
diff --git a/drivers/dma/lgm/lgm-dma.c b/drivers/dma/lgm/lgm-dma.c
index 1709d159af7e..4117c7b67e9c 100644
--- a/drivers/dma/lgm/lgm-dma.c
+++ b/drivers/dma/lgm/lgm-dma.c
@@ -1732,9 +1732,4 @@ static struct platform_driver intel_ldma_driver = {
* registered DMA channels and DMA capabilities to clients before their
* initialization.
*/
-static int __init intel_ldma_init(void)
-{
- return platform_driver_register(&intel_ldma_driver);
-}
-
-device_initcall(intel_ldma_init);
+builtin_platform_driver(intel_ldma_driver);
diff --git a/drivers/dma/lpc18xx-dmamux.c b/drivers/dma/lpc18xx-dmamux.c
index df98cae8792b..2b6436f4b193 100644
--- a/drivers/dma/lpc18xx-dmamux.c
+++ b/drivers/dma/lpc18xx-dmamux.c
@@ -12,8 +12,10 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/spinlock.h>
diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma-main.c
index 9413fad08a60..b359421ee9ea 100644
--- a/drivers/dma/mcf-edma.c
+++ b/drivers/dma/mcf-edma-main.c
@@ -19,7 +19,6 @@ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
struct fsl_edma_engine *mcf_edma = dev_id;
struct edma_regs *regs = &mcf_edma->regs;
unsigned int ch;
- struct fsl_edma_chan *mcf_chan;
u64 intmap;
intmap = ioread32(regs->inth);
@@ -31,31 +30,7 @@ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
for (ch = 0; ch < mcf_edma->n_chans; ch++) {
if (intmap & BIT(ch)) {
iowrite8(EDMA_MASK_CH(ch), regs->cint);
-
- mcf_chan = &mcf_edma->chans[ch];
-
- spin_lock(&mcf_chan->vchan.lock);
-
- if (!mcf_chan->edesc) {
- /* terminate_all called before */
- spin_unlock(&mcf_chan->vchan.lock);
- continue;
- }
-
- if (!mcf_chan->edesc->iscyclic) {
- list_del(&mcf_chan->edesc->vdesc.node);
- vchan_cookie_complete(&mcf_chan->edesc->vdesc);
- mcf_chan->edesc = NULL;
- mcf_chan->status = DMA_COMPLETE;
- mcf_chan->idle = true;
- } else {
- vchan_cyclic_callback(&mcf_chan->edesc->vdesc);
- }
-
- if (!mcf_chan->edesc)
- fsl_edma_xfer_desc(mcf_chan);
-
- spin_unlock(&mcf_chan->vchan.lock);
+ fsl_edma_tx_chan_handler(&mcf_edma->chans[ch]);
}
}
@@ -76,8 +51,7 @@ static irqreturn_t mcf_edma_err_handler(int irq, void *dev_id)
if (err & BIT(ch)) {
fsl_edma_disable_request(&mcf_edma->chans[ch]);
iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
- mcf_edma->chans[ch].status = DMA_ERROR;
- mcf_edma->chans[ch].idle = true;
+ fsl_edma_err_chan_handler(&mcf_edma->chans[ch]);
}
}
@@ -172,7 +146,7 @@ static void mcf_edma_irq_free(struct platform_device *pdev,
}
static struct fsl_edma_drvdata mcf_data = {
- .version = v2,
+ .flags = FSL_EDMA_DRV_EDMA64,
.setup_irq = mcf_edma_irq_init,
};
@@ -180,9 +154,8 @@ static int mcf_edma_probe(struct platform_device *pdev)
{
struct mcf_edma_platform_data *pdata;
struct fsl_edma_engine *mcf_edma;
- struct fsl_edma_chan *mcf_chan;
struct edma_regs *regs;
- int ret, i, len, chans;
+ int ret, i, chans;
pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
@@ -197,8 +170,8 @@ static int mcf_edma_probe(struct platform_device *pdev)
chans = pdata->dma_channels;
}
- len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans;
- mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ mcf_edma = devm_kzalloc(&pdev->dev, struct_size(mcf_edma, chans, chans),
+ GFP_KERNEL);
if (!mcf_edma)
return -ENOMEM;
@@ -227,7 +200,9 @@ static int mcf_edma_probe(struct platform_device *pdev)
mcf_chan->dma_dir = DMA_NONE;
mcf_chan->vchan.desc_free = fsl_edma_free_desc;
vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
- iowrite32(0x0, &regs->tcd[i].csr);
+ mcf_chan->tcd = mcf_edma->membase + EDMA_TCD
+ + i * sizeof(struct fsl_edma_hw_tcd);
+ iowrite32(0x0, &mcf_chan->tcd->csr);
}
iowrite32(~0, regs->inth);
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 9ae92b8940ef..324b7387b1b9 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -18,7 +18,6 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index 69cc61c0b262..64120767d983 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -17,7 +17,6 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index a1517ef1f4a0..c51dc017b48a 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -551,7 +550,6 @@ static int mtk_uart_apdma_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
- pm_runtime_set_active(&pdev->dev);
rc = dma_async_device_register(&mtkd->ddev);
if (rc)
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 4a51fdbf5aa9..1104017320b8 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -36,11 +36,11 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_dma.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/random.h>
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index acc4d53e4630..cfb9962417ef 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -21,7 +21,6 @@
#include <linux/module.h>
#include <linux/stmp_device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/list.h>
#include <linux/dma/mxs-dma.h>
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index e72e8c10355e..0b2f96fd8bf0 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -15,7 +15,6 @@
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index b6e0ac8314e5..384476757c5e 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -20,8 +20,9 @@
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include "virt-dma.h"
@@ -1116,7 +1117,7 @@ static int owl_dma_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n",
nr_channels, nr_requests);
- od->devid = (enum owl_dma_id)of_device_get_match_data(&pdev->dev);
+ od->devid = (uintptr_t)of_device_get_match_data(&pdev->dev);
od->nr_pchans = nr_channels;
od->nr_vchans = nr_requests;
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 686c270ef710..f9b82dff3387 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -28,7 +28,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include "adma.h"
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 932628b319c8..1c93864e0e4d 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -2160,8 +2160,7 @@ static int gpi_probe(struct platform_device *pdev)
return -ENOMEM;
gpi_dev->dev = &pdev->dev;
- gpi_dev->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpi_dev->regs = devm_ioremap_resource(gpi_dev->dev, gpi_dev->res);
+ gpi_dev->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &gpi_dev->res);
if (IS_ERR(gpi_dev->regs))
return PTR_ERR(gpi_dev->regs);
gpi_dev->ee_base = gpi_dev->regs;
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 344525c3a32f..834ae519c15d 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -45,12 +45,12 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/list.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/of_dma.h>
-#include <linux/of_device.h>
#include <linux/property.h>
#include <linux/delay.h>
#include <linux/acpi.h>
@@ -765,17 +765,15 @@ static int hidma_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- trca = devm_ioremap_resource(&pdev->dev, trca_resource);
+ trca = devm_platform_get_and_ioremap_resource(pdev, 0, &trca_resource);
if (IS_ERR(trca)) {
- rc = -ENOMEM;
+ rc = PTR_ERR(trca);
goto bailout;
}
- evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- evca = devm_ioremap_resource(&pdev->dev, evca_resource);
+ evca = devm_platform_get_and_ioremap_resource(pdev, 1, &evca_resource);
if (IS_ERR(evca)) {
- rc = -ENOMEM;
+ rc = PTR_ERR(evca);
goto bailout;
}
@@ -785,7 +783,7 @@ static int hidma_probe(struct platform_device *pdev)
*/
chirq = platform_get_irq(pdev, 0);
if (chirq < 0) {
- rc = -ENODEV;
+ rc = chirq;
goto bailout;
}
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 05e96b31d871..1d675f31252b 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -176,10 +176,9 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- virtaddr = devm_ioremap_resource(&pdev->dev, res);
+ virtaddr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(virtaddr)) {
- rc = -ENOMEM;
+ rc = PTR_ERR(virtaddr);
goto out;
}
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index 9479f29692d3..f777addda8ba 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -9,6 +9,7 @@
* Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
*/
+#include <linux/bitfield.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
@@ -145,8 +146,8 @@ struct rz_dmac {
#define CHCFG_REQD BIT(3)
#define CHCFG_SEL(bits) ((bits) & 0x07)
#define CHCFG_MEM_COPY (0x80400008)
-#define CHCFG_FILL_DDS(a) (((a) << 16) & GENMASK(19, 16))
-#define CHCFG_FILL_SDS(a) (((a) << 12) & GENMASK(15, 12))
+#define CHCFG_FILL_DDS_MASK GENMASK(19, 16)
+#define CHCFG_FILL_SDS_MASK GENMASK(15, 12)
#define CHCFG_FILL_TM(a) (((a) & BIT(5)) << 22)
#define CHCFG_FILL_AM(a) (((a) & GENMASK(4, 2)) << 6)
#define CHCFG_FILL_LVL(a) (((a) & BIT(1)) << 5)
@@ -607,13 +608,15 @@ static int rz_dmac_config(struct dma_chan *chan,
if (val == CHCFG_DS_INVALID)
return -EINVAL;
- channel->chcfg |= CHCFG_FILL_DDS(val);
+ channel->chcfg &= ~CHCFG_FILL_DDS_MASK;
+ channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val);
val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
if (val == CHCFG_DS_INVALID)
return -EINVAL;
- channel->chcfg |= CHCFG_FILL_SDS(val);
+ channel->chcfg &= ~CHCFG_FILL_SDS_MASK;
+ channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val);
return 0;
}
@@ -947,7 +950,6 @@ static int rz_dmac_probe(struct platform_device *pdev)
dma_register_err:
of_dma_controller_free(pdev->dev.of_node);
err:
- reset_control_assert(dmac->rstc);
channel_num = i ? i - 1 : 0;
for (i = 0; i < channel_num; i++) {
struct rz_dmac_chan *channel = &dmac->channels[i];
@@ -958,6 +960,7 @@ err:
channel->lmdesc.base_dma);
}
+ reset_control_assert(dmac->rstc);
err_pm_runtime_put:
pm_runtime_put(&pdev->dev);
err_pm_disable:
@@ -971,6 +974,8 @@ static int rz_dmac_remove(struct platform_device *pdev)
struct rz_dmac *dmac = platform_get_drvdata(pdev);
unsigned int i;
+ dma_async_device_unregister(&dmac->engine);
+ of_dma_controller_free(pdev->dev.of_node);
for (i = 0; i < dmac->n_channels; i++) {
struct rz_dmac_chan *channel = &dmac->channels[i];
@@ -979,8 +984,6 @@ static int rz_dmac_remove(struct platform_device *pdev)
channel->lmdesc.base,
channel->lmdesc.base_dma);
}
- of_dma_controller_free(pdev->dev.of_node);
- dma_async_device_unregister(&dmac->engine);
reset_control_assert(dmac->rstc);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 5aafe548ca5f..00067b29e232 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -23,7 +23,6 @@
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/rculist.h>
@@ -678,7 +677,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
int err, errirq, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
struct sh_dmae_device *shdev;
struct dma_device *dma_dev;
- struct resource *chan, *dmars, *errirq_res, *chanirq_res;
+ struct resource *dmars, *errirq_res, *chanirq_res;
if (pdev->dev.of_node)
pdata = of_device_get_match_data(&pdev->dev);
@@ -689,7 +688,6 @@ static int sh_dmae_probe(struct platform_device *pdev)
if (!pdata || !pdata->channel_num)
return -ENODEV;
- chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
/* DMARS area is optional */
dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
/*
@@ -709,7 +707,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
* requested with the IRQF_SHARED flag
*/
errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!chan || !errirq_res)
+ if (!errirq_res)
return -ENODEV;
shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
@@ -719,7 +717,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
dma_dev = &shdev->shdma_dev.dma_dev;
- shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
+ shdev->chan_reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(shdev->chan_reg))
return PTR_ERR(shdev->chan_reg);
if (dmars) {
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 2b639adb48ba..168aa0bd73a0 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -15,7 +15,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_dma.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 825001bde42c..89e82508c133 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3590,6 +3590,10 @@ static int __init d40_probe(struct platform_device *pdev)
spin_lock_init(&base->lcla_pool.lock);
base->irq = platform_get_irq(pdev, 0);
+ if (base->irq < 0) {
+ ret = base->irq;
+ goto destroy_cache;
+ }
ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
if (ret) {
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 37674029cb42..5c36811aa134 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -1581,8 +1581,7 @@ static int stm32_dma_probe(struct platform_device *pdev)
dd = &dmadev->ddev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dmadev->base = devm_ioremap_resource(&pdev->dev, res);
+ dmadev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(dmadev->base))
return PTR_ERR(dmadev->base);
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index e415bd9f4f2b..8d77e2a7939a 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -15,8 +15,10 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 1d0e9dd72ab3..0de234022c6d 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -24,7 +24,6 @@
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index ebfd29888b2f..2469efddf540 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -14,8 +14,8 @@
#include <linux/dmapool.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/slab.h>
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index 8f67f453a492..33b101001100 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -13,7 +13,7 @@
#include <linux/iopoll.h>
#include <linux/minmax.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index cc6b91f48979..063022f9df76 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -17,7 +17,6 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index b97004036071..e557bada1510 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -8,9 +8,10 @@
#include <linux/clk.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c
index f744ddbbbad7..7f17ee87a6dc 100644
--- a/drivers/dma/ti/dma-crossbar.c
+++ b/drivers/dma/ti/dma-crossbar.c
@@ -3,14 +3,15 @@
* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
* Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
*/
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/io.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/of_platform.h>
#define TI_XBAR_DRA7 0
#define TI_XBAR_AM335X 1
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 9ea91c640c32..aa8e2e8ac260 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -20,7 +20,6 @@
#include <linux/of_dma.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/platform_data/edma.h>
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index 789193ed0386..c278d5facf7d 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -558,6 +558,9 @@ int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
}
+ if (!tx_chn->virq)
+ return -ENXIO;
+
return tx_chn->virq;
}
EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
index 85e00701473c..05228bf00033 100644
--- a/drivers/dma/ti/k3-udma-private.c
+++ b/drivers/dma/ti/k3-udma-private.c
@@ -3,6 +3,8 @@
* Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
* Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
*/
+#include <linux/of.h>
+#include <linux/of_platform.h>
int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
{
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index eb4dc5fffe64..30fd2f386f36 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -20,7 +20,6 @@
#include <linux/sys_soc.h>
#include <linux/of.h>
#include <linux/of_dma.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 02e1c08c596d..cf96cf915c0c 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -16,8 +16,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
-#include <linux/of_device.h>
#include "../virt-dma.h"
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 3589b4ef50b8..bb4ff8c86733 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -18,8 +18,9 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include "dmaengine.h"
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index ac09f0e5f58d..0a3b2e22f23d 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -41,10 +41,10 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
-#include <linux/of_platform.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/io-64-nonatomic-lo-hi.h>
@@ -173,12 +173,15 @@
#define XILINX_DMA_MAX_TRANS_LEN_MAX 23
#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
+#define XILINX_DMA_CR_DELAY_MAX GENMASK(31, 24)
#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
#define XILINX_DMA_CR_COALESCE_SHIFT 16
+#define XILINX_DMA_CR_DELAY_SHIFT 24
#define XILINX_DMA_BD_SOP BIT(27)
#define XILINX_DMA_BD_EOP BIT(26)
+#define XILINX_DMA_BD_COMP_MASK BIT(31)
#define XILINX_DMA_COALESCE_MAX 255
-#define XILINX_DMA_NUM_DESCS 255
+#define XILINX_DMA_NUM_DESCS 512
#define XILINX_DMA_NUM_APP_WORDS 5
/* AXI CDMA Specific Registers/Offsets */
@@ -410,6 +413,7 @@ struct xilinx_dma_tx_descriptor {
* @stop_transfer: Differentiate b/w DMA IP's quiesce
* @tdest: TDEST value for mcdma
* @has_vflip: S2MM vertical flip
+ * @irq_delay: Interrupt delay timeout
*/
struct xilinx_dma_chan {
struct xilinx_dma_device *xdev;
@@ -448,6 +452,7 @@ struct xilinx_dma_chan {
int (*stop_transfer)(struct xilinx_dma_chan *chan);
u16 tdest;
bool has_vflip;
+ u8 irq_delay;
};
/**
@@ -493,6 +498,7 @@ struct xilinx_dma_config {
* @s2mm_chan_id: DMA s2mm channel identifier
* @mm2s_chan_id: DMA mm2s channel identifier
* @max_buffer_len: Max buffer length
+ * @has_axistream_connected: AXI DMA connected to AXI Stream IP
*/
struct xilinx_dma_device {
void __iomem *regs;
@@ -511,6 +517,7 @@ struct xilinx_dma_device {
u32 s2mm_chan_id;
u32 mm2s_chan_id;
u32 max_buffer_len;
+ bool has_axistream_connected;
};
/* Macros */
@@ -623,6 +630,29 @@ static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan,
}
}
+/**
+ * xilinx_dma_get_metadata_ptr- Populate metadata pointer and payload length
+ * @tx: async transaction descriptor
+ * @payload_len: metadata payload length
+ * @max_len: metadata max length
+ * Return: The app field pointer.
+ */
+static void *xilinx_dma_get_metadata_ptr(struct dma_async_tx_descriptor *tx,
+ size_t *payload_len, size_t *max_len)
+{
+ struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
+ struct xilinx_axidma_tx_segment *seg;
+
+ *max_len = *payload_len = sizeof(u32) * XILINX_DMA_NUM_APP_WORDS;
+ seg = list_first_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+ return seg->hw.app;
+}
+
+static struct dma_descriptor_metadata_ops xilinx_dma_metadata_ops = {
+ .get_ptr = xilinx_dma_get_metadata_ptr,
+};
+
/* -----------------------------------------------------------------------------
* Descriptors and segments alloc and free
*/
@@ -1535,6 +1565,9 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
if (chan->has_sg)
xilinx_write(chan, XILINX_DMA_REG_CURDESC,
head_desc->async_tx.phys);
+ reg &= ~XILINX_DMA_CR_DELAY_MAX;
+ reg |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT;
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
xilinx_dma_start(chan);
@@ -1683,6 +1716,14 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
return;
list_for_each_entry_safe(desc, next, &chan->active_list, node) {
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ struct xilinx_axidma_tx_segment *seg;
+
+ seg = list_last_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+ if (!(seg->hw.status & XILINX_DMA_BD_COMP_MASK) && chan->has_sg)
+ break;
+ }
if (chan->has_sg && chan->xdev->dma_config->dmatype !=
XDMA_TYPE_VDMA)
desc->residue = xilinx_dma_get_residue(chan, desc);
@@ -1816,7 +1857,7 @@ static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
spin_unlock(&chan->lock);
}
- tasklet_schedule(&chan->tasklet);
+ tasklet_hi_schedule(&chan->tasklet);
return IRQ_HANDLED;
}
@@ -1864,15 +1905,8 @@ static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
}
}
- if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
- /*
- * Device takes too long to do the transfer when user requires
- * responsiveness.
- */
- dev_dbg(chan->dev, "Inter-packet latency too long\n");
- }
-
- if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
+ if (status & (XILINX_DMA_DMASR_FRM_CNT_IRQ |
+ XILINX_DMA_DMASR_DLY_CNT_IRQ)) {
spin_lock(&chan->lock);
xilinx_dma_complete_descriptor(chan);
chan->idle = true;
@@ -2221,6 +2255,9 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
segment->hw.control |= XILINX_DMA_BD_EOP;
}
+ if (chan->xdev->has_axistream_connected)
+ desc->async_tx.metadata_ops = &xilinx_dma_metadata_ops;
+
return &desc->async_tx;
error:
@@ -2796,6 +2833,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
/* Retrieve the channel properties from the device tree */
has_dre = of_property_read_bool(node, "xlnx,include-dre");
+ of_property_read_u8(node, "xlnx,irq-delay", &chan->irq_delay);
+
chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
err = of_property_read_u32(node, "xlnx,datawidth", &value);
@@ -3067,6 +3106,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
}
}
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ xdev->has_axistream_connected =
+ of_property_read_bool(node, "xlnx,axistream-connected");
+ }
+
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
err = of_property_read_u32(node, "xlnx,num-fstores",
&num_frames);
@@ -3092,6 +3136,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
else
xdev->ext_addr = false;
+ /* Set metadata mode */
+ if (xdev->has_axistream_connected)
+ xdev->common.desc_metadata_modes = DESC_METADATA_ENGINE;
+
/* Set the dma mask bits */
err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
if (err < 0) {
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 9360f43b8e0f..bd8c3cc2eaab 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -11,8 +11,9 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/io-64-nonatomic-lo-hi.h>
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index a3104e35412c..aa597cda0d88 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -1211,7 +1211,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
* without actually having a link.
*/
create:
- device = kzalloc(sizeof(*device), GFP_KERNEL);
+ device = kzalloc(sizeof(*device), GFP_ATOMIC);
if (device == NULL)
break;
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 88466b663482..f40c81534381 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -101,7 +101,7 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
{
struct fw_node *node;
- node = kzalloc(struct_size(node, ports, port_count), GFP_KERNEL);
+ node = kzalloc(struct_size(node, ports, port_count), GFP_ATOMIC);
if (node == NULL)
return NULL;
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 26db5b8dfc1e..749868b9e80d 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -81,7 +81,8 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
*
* - power condition
* Set the power condition field in the START STOP UNIT commands sent by
- * sd_mod on suspend, resume, and shutdown (if manage_start_stop is on).
+ * sd_mod on suspend, resume, and shutdown (if manage_system_start_stop or
+ * manage_runtime_start_stop is on).
* Some disks need this to spin down or to resume properly.
*
* - override internal blacklist
@@ -1517,8 +1518,10 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
sdev->use_10_for_rw = 1;
- if (sbp2_param_exclusive_login)
- sdev->manage_start_stop = 1;
+ if (sbp2_param_exclusive_login) {
+ sdev->manage_system_start_stop = true;
+ sdev->manage_runtime_start_stop = true;
+ }
if (sdev->type == TYPE_ROM)
sdev->use_10_for_ms = 1;
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 2109cd178ff7..121f4fc903cd 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -397,6 +397,19 @@ static u32 ffa_get_num_pages_sg(struct scatterlist *sg)
return num_pages;
}
+static u8 ffa_memory_attributes_get(u32 func_id)
+{
+ /*
+ * For the memory lend or donate operation, if the receiver is a PE or
+ * a proxy endpoint, the owner/sender must not specify the attributes
+ */
+ if (func_id == FFA_FN_NATIVE(MEM_LEND) ||
+ func_id == FFA_MEM_LEND)
+ return 0;
+
+ return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE;
+}
+
static int
ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
struct ffa_mem_ops_args *args)
@@ -413,8 +426,7 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
mem_region->tag = args->tag;
mem_region->flags = args->flags;
mem_region->sender_id = drv_info->vm_id;
- mem_region->attributes = FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK |
- FFA_MEM_INNER_SHAREABLE;
+ mem_region->attributes = ffa_memory_attributes_get(func_id);
ep_mem_access = &mem_region->ep_mem_access[0];
for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index c0cd556fbaae..30dedd6ebfde 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -1080,6 +1080,8 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
if (!pinfo)
return -ENOMEM;
+ pinfo->version = version;
+
ret = scmi_perf_attributes_get(ph, pinfo);
if (ret)
return ret;
@@ -1104,8 +1106,6 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
if (ret)
return ret;
- pinfo->version = version;
-
return ph->set_priv(ph, pinfo);
}
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 49b70c70dc69..79d4254d1f9b 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -1863,15 +1863,15 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp)
return PTR_ERR(adsp2_alg);
for (i = 0; i < n_algs; i++) {
- cs_dsp_info(dsp,
- "%d: ID %x v%d.%d.%d XM@%x YM@%x ZM@%x\n",
- i, be32_to_cpu(adsp2_alg[i].alg.id),
- (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff0000) >> 16,
- (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff00) >> 8,
- be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff,
- be32_to_cpu(adsp2_alg[i].xm),
- be32_to_cpu(adsp2_alg[i].ym),
- be32_to_cpu(adsp2_alg[i].zm));
+ cs_dsp_dbg(dsp,
+ "%d: ID %x v%d.%d.%d XM@%x YM@%x ZM@%x\n",
+ i, be32_to_cpu(adsp2_alg[i].alg.id),
+ (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff0000) >> 16,
+ (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff00) >> 8,
+ be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff,
+ be32_to_cpu(adsp2_alg[i].xm),
+ be32_to_cpu(adsp2_alg[i].ym),
+ be32_to_cpu(adsp2_alg[i].zm));
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_XM,
adsp2_alg[i].alg.id,
@@ -1996,14 +1996,14 @@ static int cs_dsp_halo_setup_algs(struct cs_dsp *dsp)
return PTR_ERR(halo_alg);
for (i = 0; i < n_algs; i++) {
- cs_dsp_info(dsp,
- "%d: ID %x v%d.%d.%d XM@%x YM@%x\n",
- i, be32_to_cpu(halo_alg[i].alg.id),
- (be32_to_cpu(halo_alg[i].alg.ver) & 0xff0000) >> 16,
- (be32_to_cpu(halo_alg[i].alg.ver) & 0xff00) >> 8,
- be32_to_cpu(halo_alg[i].alg.ver) & 0xff,
- be32_to_cpu(halo_alg[i].xm_base),
- be32_to_cpu(halo_alg[i].ym_base));
+ cs_dsp_dbg(dsp,
+ "%d: ID %x v%d.%d.%d XM@%x YM@%x\n",
+ i, be32_to_cpu(halo_alg[i].alg.id),
+ (be32_to_cpu(halo_alg[i].alg.ver) & 0xff0000) >> 16,
+ (be32_to_cpu(halo_alg[i].alg.ver) & 0xff00) >> 8,
+ be32_to_cpu(halo_alg[i].alg.ver) & 0xff,
+ be32_to_cpu(halo_alg[i].xm_base),
+ be32_to_cpu(halo_alg[i].ym_base));
ret = cs_dsp_halo_create_regions(dsp, halo_alg[i].alg.id,
halo_alg[i].alg.ver,
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 1599f1176842..ce20a60676f0 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -623,6 +623,34 @@ static __init int match_config_table(const efi_guid_t *guid,
return 0;
}
+/**
+ * reserve_unaccepted - Map and reserve unaccepted configuration table
+ * @unaccepted: Pointer to unaccepted memory table
+ *
+ * memblock_add() makes sure that the table is mapped in direct mapping. During
+ * normal boot it happens automatically because the table is allocated from
+ * usable memory. But during crashkernel boot only memory specifically reserved
+ * for crash scenario is mapped. memblock_add() forces the table to be mapped
+ * in crashkernel case.
+ *
+ * Align the range to the nearest page borders. Ranges smaller than page size
+ * are not going to be mapped.
+ *
+ * memblock_reserve() makes sure that future allocations will not touch the
+ * table.
+ */
+
+static __init void reserve_unaccepted(struct efi_unaccepted_memory *unaccepted)
+{
+ phys_addr_t start, size;
+
+ start = PAGE_ALIGN_DOWN(efi.unaccepted);
+ size = PAGE_ALIGN(sizeof(*unaccepted) + unaccepted->size);
+
+ memblock_add(start, size);
+ memblock_reserve(start, size);
+}
+
int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
int count,
const efi_config_table_type_t *arch_tables)
@@ -751,11 +779,9 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
unaccepted = early_memremap(efi.unaccepted, sizeof(*unaccepted));
if (unaccepted) {
- unsigned long size;
if (unaccepted->version == 1) {
- size = sizeof(*unaccepted) + unaccepted->size;
- memblock_reserve(efi.unaccepted, size);
+ reserve_unaccepted(unaccepted);
} else {
efi.unaccepted = EFI_INVALID_TABLE_ADDR;
}
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 92389a5481ff..a1157c2a7170 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -86,10 +86,10 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \
screen_info.o efi-stub-entry.o
lib-$(CONFIG_ARM) += arm32-stub.o
-lib-$(CONFIG_ARM64) += arm64.o arm64-stub.o smbios.o
+lib-$(CONFIG_ARM64) += kaslr.o arm64.o arm64-stub.o smbios.o
lib-$(CONFIG_X86) += x86-stub.o
lib-$(CONFIG_X86_64) += x86-5lvl.o
-lib-$(CONFIG_RISCV) += riscv.o riscv-stub.o
+lib-$(CONFIG_RISCV) += kaslr.o riscv.o riscv-stub.o
lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o
CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 8c40fc89f5f9..452b7ccd330e 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -14,42 +14,6 @@
#include "efistub.h"
-/*
- * Distro versions of GRUB may ignore the BSS allocation entirely (i.e., fail
- * to provide space, and fail to zero it). Check for this condition by double
- * checking that the first and the last byte of the image are covered by the
- * same EFI memory map entry.
- */
-static bool check_image_region(u64 base, u64 size)
-{
- struct efi_boot_memmap *map;
- efi_status_t status;
- bool ret = false;
- int map_offset;
-
- status = efi_get_memory_map(&map, false);
- if (status != EFI_SUCCESS)
- return false;
-
- for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) {
- efi_memory_desc_t *md = (void *)map->map + map_offset;
- u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE;
-
- /*
- * Find the region that covers base, and return whether
- * it covers base+size bytes.
- */
- if (base >= md->phys_addr && base < end) {
- ret = (base + size) <= end;
- break;
- }
- }
-
- efi_bs_call(free_pool, map);
-
- return ret;
-}
-
efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
@@ -59,31 +23,6 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
{
efi_status_t status;
unsigned long kernel_size, kernel_codesize, kernel_memsize;
- u32 phys_seed = 0;
- u64 min_kimg_align = efi_get_kimg_min_align();
-
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
- efi_guid_t li_fixed_proto = LINUX_EFI_LOADED_IMAGE_FIXED_GUID;
- void *p;
-
- if (efi_nokaslr) {
- efi_info("KASLR disabled on kernel command line\n");
- } else if (efi_bs_call(handle_protocol, image_handle,
- &li_fixed_proto, &p) == EFI_SUCCESS) {
- efi_info("Image placement fixed by loader\n");
- } else {
- status = efi_get_random_bytes(sizeof(phys_seed),
- (u8 *)&phys_seed);
- if (status == EFI_NOT_FOUND) {
- efi_info("EFI_RNG_PROTOCOL unavailable\n");
- efi_nokaslr = true;
- } else if (status != EFI_SUCCESS) {
- efi_err("efi_get_random_bytes() failed (0x%lx)\n",
- status);
- efi_nokaslr = true;
- }
- }
- }
if (image->image_base != _text) {
efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
@@ -98,50 +37,15 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
kernel_codesize = __inittext_end - _text;
kernel_memsize = kernel_size + (_end - _edata);
*reserve_size = kernel_memsize;
+ *image_addr = (unsigned long)_text;
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
- /*
- * If KASLR is enabled, and we have some randomness available,
- * locate the kernel at a randomized offset in physical memory.
- */
- status = efi_random_alloc(*reserve_size, min_kimg_align,
- reserve_addr, phys_seed,
- EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
- if (status != EFI_SUCCESS)
- efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
- } else {
- status = EFI_OUT_OF_RESOURCES;
- }
-
- if (status != EFI_SUCCESS) {
- if (!check_image_region((u64)_text, kernel_memsize)) {
- efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n");
- } else if (IS_ALIGNED((u64)_text, min_kimg_align) &&
- (u64)_end < EFI_ALLOC_LIMIT) {
- /*
- * Just execute from wherever we were loaded by the
- * UEFI PE/COFF loader if the placement is suitable.
- */
- *image_addr = (u64)_text;
- *reserve_size = 0;
- return EFI_SUCCESS;
- }
-
- status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
- ULONG_MAX, min_kimg_align,
- EFI_LOADER_CODE);
-
- if (status != EFI_SUCCESS) {
- efi_err("Failed to relocate kernel\n");
- *reserve_size = 0;
- return status;
- }
- }
-
- *image_addr = *reserve_addr;
- memcpy((void *)*image_addr, _text, kernel_size);
- caches_clean_inval_pou(*image_addr, *image_addr + kernel_codesize);
- efi_remap_image(*image_addr, *reserve_size, kernel_codesize);
+ status = efi_kaslr_relocate_kernel(image_addr,
+ reserve_addr, reserve_size,
+ kernel_size, kernel_codesize,
+ kernel_memsize,
+ efi_kaslr_get_phys_seed(image_handle));
+ if (status != EFI_SUCCESS)
+ return status;
return EFI_SUCCESS;
}
@@ -159,3 +63,8 @@ unsigned long primary_entry_offset(void)
*/
return (char *)primary_entry - _text;
}
+
+void efi_icache_sync(unsigned long start, unsigned long end)
+{
+ caches_clean_inval_pou(start, end);
+}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 9823f6fb3e01..212687c30d79 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -1133,6 +1133,14 @@ const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
void efi_remap_image(unsigned long image_base, unsigned alloc_size,
unsigned long code_size);
+efi_status_t efi_kaslr_relocate_kernel(unsigned long *image_addr,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ unsigned long kernel_size,
+ unsigned long kernel_codesize,
+ unsigned long kernel_memsize,
+ u32 phys_seed);
+u32 efi_kaslr_get_phys_seed(efi_handle_t image_handle);
asmlinkage efi_status_t __efiapi
efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab);
diff --git a/drivers/firmware/efi/libstub/kaslr.c b/drivers/firmware/efi/libstub/kaslr.c
new file mode 100644
index 000000000000..62d63f7a2645
--- /dev/null
+++ b/drivers/firmware/efi/libstub/kaslr.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helper functions used by the EFI stub on multiple
+ * architectures to deal with physical address space randomization.
+ */
+#include <linux/efi.h>
+
+#include "efistub.h"
+
+/**
+ * efi_kaslr_get_phys_seed() - Get random seed for physical kernel KASLR
+ * @image_handle: Handle to the image
+ *
+ * If KASLR is not disabled, obtain a random seed using EFI_RNG_PROTOCOL
+ * that will be used to move the kernel physical mapping.
+ *
+ * Return: the random seed
+ */
+u32 efi_kaslr_get_phys_seed(efi_handle_t image_handle)
+{
+ efi_status_t status;
+ u32 phys_seed;
+ efi_guid_t li_fixed_proto = LINUX_EFI_LOADED_IMAGE_FIXED_GUID;
+ void *p;
+
+ if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ return 0;
+
+ if (efi_nokaslr) {
+ efi_info("KASLR disabled on kernel command line\n");
+ } else if (efi_bs_call(handle_protocol, image_handle,
+ &li_fixed_proto, &p) == EFI_SUCCESS) {
+ efi_info("Image placement fixed by loader\n");
+ } else {
+ status = efi_get_random_bytes(sizeof(phys_seed),
+ (u8 *)&phys_seed);
+ if (status == EFI_SUCCESS) {
+ return phys_seed;
+ } else if (status == EFI_NOT_FOUND) {
+ efi_info("EFI_RNG_PROTOCOL unavailable\n");
+ efi_nokaslr = true;
+ } else if (status != EFI_SUCCESS) {
+ efi_err("efi_get_random_bytes() failed (0x%lx)\n",
+ status);
+ efi_nokaslr = true;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Distro versions of GRUB may ignore the BSS allocation entirely (i.e., fail
+ * to provide space, and fail to zero it). Check for this condition by double
+ * checking that the first and the last byte of the image are covered by the
+ * same EFI memory map entry.
+ */
+static bool check_image_region(u64 base, u64 size)
+{
+ struct efi_boot_memmap *map;
+ efi_status_t status;
+ bool ret = false;
+ int map_offset;
+
+ status = efi_get_memory_map(&map, false);
+ if (status != EFI_SUCCESS)
+ return false;
+
+ for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) {
+ efi_memory_desc_t *md = (void *)map->map + map_offset;
+ u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE;
+
+ /*
+ * Find the region that covers base, and return whether
+ * it covers base+size bytes.
+ */
+ if (base >= md->phys_addr && base < end) {
+ ret = (base + size) <= end;
+ break;
+ }
+ }
+
+ efi_bs_call(free_pool, map);
+
+ return ret;
+}
+
+/**
+ * efi_kaslr_relocate_kernel() - Relocate the kernel (random if KASLR enabled)
+ * @image_addr: Pointer to the current kernel location
+ * @reserve_addr: Pointer to the relocated kernel location
+ * @reserve_size: Size of the relocated kernel
+ * @kernel_size: Size of the text + data
+ * @kernel_codesize: Size of the text
+ * @kernel_memsize: Size of the text + data + bss
+ * @phys_seed: Random seed used for the relocation
+ *
+ * If KASLR is not enabled, this function relocates the kernel to a fixed
+ * address (or leave it as its current location). If KASLR is enabled, the
+ * kernel physical location is randomized using the seed in parameter.
+ *
+ * Return: status code, EFI_SUCCESS if relocation is successful
+ */
+efi_status_t efi_kaslr_relocate_kernel(unsigned long *image_addr,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ unsigned long kernel_size,
+ unsigned long kernel_codesize,
+ unsigned long kernel_memsize,
+ u32 phys_seed)
+{
+ efi_status_t status;
+ u64 min_kimg_align = efi_get_kimg_min_align();
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
+ /*
+ * If KASLR is enabled, and we have some randomness available,
+ * locate the kernel at a randomized offset in physical memory.
+ */
+ status = efi_random_alloc(*reserve_size, min_kimg_align,
+ reserve_addr, phys_seed,
+ EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
+ if (status != EFI_SUCCESS)
+ efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
+ } else {
+ status = EFI_OUT_OF_RESOURCES;
+ }
+
+ if (status != EFI_SUCCESS) {
+ if (!check_image_region(*image_addr, kernel_memsize)) {
+ efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n");
+ } else if (IS_ALIGNED(*image_addr, min_kimg_align) &&
+ (unsigned long)_end < EFI_ALLOC_LIMIT) {
+ /*
+ * Just execute from wherever we were loaded by the
+ * UEFI PE/COFF loader if the placement is suitable.
+ */
+ *reserve_size = 0;
+ return EFI_SUCCESS;
+ }
+
+ status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
+ ULONG_MAX, min_kimg_align,
+ EFI_LOADER_CODE);
+
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to relocate kernel\n");
+ *reserve_size = 0;
+ return status;
+ }
+ }
+
+ memcpy((void *)*reserve_addr, (void *)*image_addr, kernel_size);
+ *image_addr = *reserve_addr;
+ efi_icache_sync(*image_addr, *image_addr + kernel_codesize);
+ efi_remap_image(*image_addr, *reserve_size, kernel_codesize);
+
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c
index 145c9f0ba217..c96d6dcee86c 100644
--- a/drivers/firmware/efi/libstub/riscv-stub.c
+++ b/drivers/firmware/efi/libstub/riscv-stub.c
@@ -30,32 +30,29 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
efi_loaded_image_t *image,
efi_handle_t image_handle)
{
- unsigned long kernel_size = 0;
- unsigned long preferred_addr;
+ unsigned long kernel_size, kernel_codesize, kernel_memsize;
efi_status_t status;
kernel_size = _edata - _start;
+ kernel_codesize = __init_text_end - _start;
+ kernel_memsize = kernel_size + (_end - _edata);
*image_addr = (unsigned long)_start;
- *image_size = kernel_size + (_end - _edata);
-
- /*
- * RISC-V kernel maps PAGE_OFFSET virtual address to the same physical
- * address where kernel is booted. That's why kernel should boot from
- * as low as possible to avoid wastage of memory. Currently, dram_base
- * is occupied by the firmware. So the preferred address for kernel to
- * boot is next aligned address. If preferred address is not available,
- * relocate_kernel will fall back to efi_low_alloc_above to allocate
- * lowest possible memory region as long as the address and size meets
- * the alignment constraints.
- */
- preferred_addr = EFI_KIMG_PREFERRED_ADDRESS;
- status = efi_relocate_kernel(image_addr, kernel_size, *image_size,
- preferred_addr, efi_get_kimg_min_align(),
- 0x0);
+ *image_size = kernel_memsize;
+ *reserve_size = *image_size;
+ status = efi_kaslr_relocate_kernel(image_addr,
+ reserve_addr, reserve_size,
+ kernel_size, kernel_codesize, kernel_memsize,
+ efi_kaslr_get_phys_seed(image_handle));
if (status != EFI_SUCCESS) {
efi_err("Failed to relocate kernel\n");
*image_size = 0;
}
+
return status;
}
+
+void efi_icache_sync(unsigned long start, unsigned long end)
+{
+ asm volatile ("fence.i" ::: "memory");
+}
diff --git a/drivers/firmware/efi/libstub/unaccepted_memory.c b/drivers/firmware/efi/libstub/unaccepted_memory.c
index ca61f4733ea5..9a655f30ba47 100644
--- a/drivers/firmware/efi/libstub/unaccepted_memory.c
+++ b/drivers/firmware/efi/libstub/unaccepted_memory.c
@@ -62,7 +62,7 @@ efi_status_t allocate_unaccepted_bitmap(__u32 nr_desc,
bitmap_size = DIV_ROUND_UP(unaccepted_end - unaccepted_start,
EFI_UNACCEPTED_UNIT_SIZE * BITS_PER_BYTE);
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY,
sizeof(*unaccepted_table) + bitmap_size,
(void **)&unaccepted_table);
if (status != EFI_SUCCESS) {
diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c
index 3dba590a2a95..508eab346fc6 100644
--- a/drivers/firmware/imx/imx-dsp.c
+++ b/drivers/firmware/imx/imx-dsp.c
@@ -114,6 +114,7 @@ static int imx_dsp_setup_channels(struct imx_dsp_ipc *dsp_ipc)
dsp_chan->idx = i % 2;
dsp_chan->ch = mbox_request_channel_byname(cl, chan_name);
if (IS_ERR(dsp_chan->ch)) {
+ kfree(dsp_chan->name);
ret = PTR_ERR(dsp_chan->ch);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to request mbox chan %s ret %d\n",
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index da33bbbdacb9..58f107194fda 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -973,7 +973,7 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
else if (param == PIN_CONFIG_BIAS_DISABLE ||
param == PIN_CONFIG_BIAS_PULL_DOWN ||
param == PIN_CONFIG_DRIVE_STRENGTH)
- return pinctrl_gpio_set_config(offset, config);
+ return pinctrl_gpio_set_config(chip->base + offset, config);
else if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN ||
param == PIN_CONFIG_DRIVE_OPEN_SOURCE)
/* Return -ENOTSUPP to trigger emulation, as per datasheet */
diff --git a/drivers/gpio/gpio-pmic-eic-sprd.c b/drivers/gpio/gpio-pmic-eic-sprd.c
index 2b9b7be9b8fd..01c0fd0a9d8c 100644
--- a/drivers/gpio/gpio-pmic-eic-sprd.c
+++ b/drivers/gpio/gpio-pmic-eic-sprd.c
@@ -352,6 +352,7 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev)
pmic_eic->chip.set_config = sprd_pmic_eic_set_config;
pmic_eic->chip.set = sprd_pmic_eic_set;
pmic_eic->chip.get = sprd_pmic_eic_get;
+ pmic_eic->chip.can_sleep = true;
irq = &pmic_eic->chip.irq;
gpio_irq_chip_set_chip(irq, &pmic_eic_irq_chip);
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 7e9f7a32d3ee..cae9661862fe 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -237,6 +237,7 @@ static bool pxa_gpio_has_pinctrl(void)
switch (gpio_type) {
case PXA3XX_GPIO:
case MMP2_GPIO:
+ case MMP_GPIO:
return false;
default:
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index 271db3639a78..44bf1709a648 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -19,6 +19,7 @@
#include <linux/irq.h>
#include <linux/irq_sim.h>
#include <linux/list.h>
+#include <linux/minmax.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -685,52 +686,32 @@ gpio_sim_device_config_live_show(struct config_item *item, char *page)
return sprintf(page, "%c\n", live ? '1' : '0');
}
-static char **gpio_sim_make_line_names(struct gpio_sim_bank *bank,
- unsigned int *line_names_size)
+static unsigned int gpio_sim_get_line_names_size(struct gpio_sim_bank *bank)
{
- unsigned int max_offset = 0;
- bool has_line_names = false;
struct gpio_sim_line *line;
- char **line_names;
+ unsigned int size = 0;
list_for_each_entry(line, &bank->line_list, siblings) {
- if (line->offset >= bank->num_lines)
+ if (!line->name || (line->offset >= bank->num_lines))
continue;
- if (line->name) {
- if (line->offset > max_offset)
- max_offset = line->offset;
-
- /*
- * max_offset can stay at 0 so it's not an indicator
- * of whether line names were configured at all.
- */
- has_line_names = true;
- }
+ size = max(size, line->offset + 1);
}
- if (!has_line_names)
- /*
- * This is not an error - NULL means, there are no line
- * names configured.
- */
- return NULL;
-
- *line_names_size = max_offset + 1;
+ return size;
+}
- line_names = kcalloc(*line_names_size, sizeof(*line_names), GFP_KERNEL);
- if (!line_names)
- return ERR_PTR(-ENOMEM);
+static void
+gpio_sim_set_line_names(struct gpio_sim_bank *bank, char **line_names)
+{
+ struct gpio_sim_line *line;
list_for_each_entry(line, &bank->line_list, siblings) {
- if (line->offset >= bank->num_lines)
+ if (!line->name || (line->offset >= bank->num_lines))
continue;
- if (line->name && (line->offset <= max_offset))
- line_names[line->offset] = line->name;
+ line_names[line->offset] = line->name;
}
-
- return line_names;
}
static void gpio_sim_remove_hogs(struct gpio_sim_device *dev)
@@ -834,7 +815,7 @@ gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank,
struct fwnode_handle *parent)
{
struct property_entry properties[GPIO_SIM_PROP_MAX];
- unsigned int prop_idx = 0, line_names_size = 0;
+ unsigned int prop_idx = 0, line_names_size;
char **line_names __free(kfree) = NULL;
memset(properties, 0, sizeof(properties));
@@ -845,14 +826,19 @@ gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank,
properties[prop_idx++] = PROPERTY_ENTRY_STRING("gpio-sim,label",
bank->label);
- line_names = gpio_sim_make_line_names(bank, &line_names_size);
- if (IS_ERR(line_names))
- return ERR_CAST(line_names);
+ line_names_size = gpio_sim_get_line_names_size(bank);
+ if (line_names_size) {
+ line_names = kcalloc(line_names_size, sizeof(*line_names),
+ GFP_KERNEL);
+ if (!line_names)
+ return ERR_PTR(-ENOMEM);
+
+ gpio_sim_set_line_names(bank, line_names);
- if (line_names)
properties[prop_idx++] = PROPERTY_ENTRY_STRING_ARRAY_LEN(
"gpio-line-names",
line_names, line_names_size);
+ }
return fwnode_create_software_node(properties, parent);
}
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 78f8790168ae..f96d260a4a19 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -195,7 +195,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
handle_edge_irq, IRQ_NOREQUEST, IRQ_NOPROBE,
IRQ_GC_INIT_MASK_CACHE);
if (ret)
- return ret;
+ goto err_remove_domain;
gc = tb10x_gpio->domain->gc->gc[0];
gc->reg_base = tb10x_gpio->base;
@@ -209,6 +209,10 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
}
return 0;
+
+err_remove_domain:
+ irq_domain_remove(tb10x_gpio->domain);
+ return ret;
}
static int tb10x_gpio_remove(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index bbd9e9191199..fad979797486 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -43,9 +43,10 @@ static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
unsigned offset, bool enabled)
{
struct timbgpio *tgpio = gpiochip_get_data(gpio);
+ unsigned long flags;
u32 reg;
- spin_lock(&tgpio->lock);
+ spin_lock_irqsave(&tgpio->lock, flags);
reg = ioread32(tgpio->membase + offset);
if (enabled)
@@ -54,7 +55,7 @@ static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
reg &= ~(1 << index);
iowrite32(reg, tgpio->membase + offset);
- spin_unlock(&tgpio->lock);
+ spin_unlock_irqrestore(&tgpio->lock, flags);
return 0;
}
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 0a7264aabe48..324e942c0650 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -575,6 +575,26 @@ static int zynq_gpio_set_wake(struct irq_data *data, unsigned int on)
return 0;
}
+static int zynq_gpio_irq_reqres(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(chip->parent);
+ if (ret < 0)
+ return ret;
+
+ return gpiochip_reqres_irq(chip, d->hwirq);
+}
+
+static void zynq_gpio_irq_relres(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+
+ gpiochip_relres_irq(chip, d->hwirq);
+ pm_runtime_put(chip->parent);
+}
+
/* irq chip descriptor */
static const struct irq_chip zynq_gpio_level_irqchip = {
.name = DRIVER_NAME,
@@ -584,9 +604,10 @@ static const struct irq_chip zynq_gpio_level_irqchip = {
.irq_unmask = zynq_gpio_irq_unmask,
.irq_set_type = zynq_gpio_set_irq_type,
.irq_set_wake = zynq_gpio_set_wake,
+ .irq_request_resources = zynq_gpio_irq_reqres,
+ .irq_release_resources = zynq_gpio_irq_relres,
.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED |
IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static const struct irq_chip zynq_gpio_edge_irqchip = {
@@ -597,8 +618,9 @@ static const struct irq_chip zynq_gpio_edge_irqchip = {
.irq_unmask = zynq_gpio_irq_unmask,
.irq_set_type = zynq_gpio_set_irq_type,
.irq_set_wake = zynq_gpio_set_wake,
+ .irq_request_resources = zynq_gpio_irq_reqres,
+ .irq_release_resources = zynq_gpio_irq_relres,
.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static void zynq_gpio_handle_bank_irq(struct zynq_gpio *gpio,
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index ab9ef1c20349..3caa020391c7 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -136,7 +136,7 @@ config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM
select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
- default y
+ default FB
help
Choose this option if you have a need for the legacy fbdev
support. Note that this support also provides the linux console
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index dc2d53081e80..a79d53bdbe13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1293,7 +1293,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev);
-bool amdgpu_sg_display_supported(struct amdgpu_device *adev);
bool amdgpu_device_pcie_dynamic_switching_supported(void);
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
bool amdgpu_device_aspm_support_quirk(void);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index df633e9ce920..25d5fda5b243 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -442,9 +442,7 @@ void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
mem_info->local_mem_size_public,
mem_info->local_mem_size_private);
- if (amdgpu_sriov_vf(adev))
- mem_info->mem_clk_max = adev->clock.default_mclk / 100;
- else if (adev->pm.dpm_enabled) {
+ if (adev->pm.dpm_enabled) {
if (amdgpu_emu_mode == 1)
mem_info->mem_clk_max = 0;
else
@@ -463,9 +461,7 @@ uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev)
uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev)
{
/* the sclk is in quantas of 10kHz */
- if (amdgpu_sriov_vf(adev))
- return adev->clock.default_sclk / 100;
- else if (adev->pm.dpm_enabled)
+ if (adev->pm.dpm_enabled)
return amdgpu_dpm_get_sclk(adev, false) / 100;
else
return 100;
@@ -482,7 +478,7 @@ void amdgpu_amdkfd_get_cu_info(struct amdgpu_device *adev, struct kfd_cu_info *c
cu_info->cu_active_number = acu_info.number;
cu_info->cu_ao_mask = acu_info.ao_cu_mask;
memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
- sizeof(acu_info.bitmap));
+ sizeof(cu_info->cu_bitmap));
cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index f1f2c24de081..69810b3f1c63 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -980,8 +980,7 @@ void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
uint32_t grace_period,
uint32_t *reg_offset,
- uint32_t *reg_data,
- uint32_t inst)
+ uint32_t *reg_data)
{
*reg_data = wait_times;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
index ecaead24e8c9..67bcaa3d4226 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
@@ -55,5 +55,4 @@ void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
uint32_t grace_period,
uint32_t *reg_offset,
- uint32_t *reg_data,
- uint32_t inst);
+ uint32_t *reg_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index fa5ee96f8845..3c45a188b701 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -1103,8 +1103,7 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
uint32_t grace_period,
uint32_t *reg_offset,
- uint32_t *reg_data,
- uint32_t inst)
+ uint32_t *reg_data)
{
*reg_data = wait_times;
@@ -1120,8 +1119,7 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
SCH_WAVE,
grace_period);
- *reg_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
- mmCP_IQ_WAIT_TIME2);
+ *reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2);
}
void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index 936e501908ce..ce424615f59b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -100,5 +100,4 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
uint32_t grace_period,
uint32_t *reg_offset,
- uint32_t *reg_data,
- uint32_t inst);
+ uint32_t *reg_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 73ee14f7a9a4..dce9e7d5e4ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1776,7 +1776,7 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
struct atom_context *ctx = adev->mode_info.atom_context;
- return sysfs_emit(buf, "%s\n", ctx->vbios_ver_str);
+ return sysfs_emit(buf, "%s\n", ctx->vbios_pn);
}
static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 835980e94b9e..fb2681dd6b33 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -217,6 +217,7 @@ union umc_info {
struct atom_umc_info_v3_1 v31;
struct atom_umc_info_v3_2 v32;
struct atom_umc_info_v3_3 v33;
+ struct atom_umc_info_v4_0 v40;
};
union vram_info {
@@ -508,9 +509,8 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
if (amdgpu_atom_parse_data_header(mode_info->atom_context,
index, &size, &frev, &crev, &data_offset)) {
+ umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset);
if (frev == 3) {
- umc_info = (union umc_info *)
- (mode_info->atom_context->bios + data_offset);
switch (crev) {
case 1:
umc_config = le32_to_cpu(umc_info->v31.umc_config);
@@ -533,6 +533,20 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
/* unsupported crev */
return false;
}
+ } else if (frev == 4) {
+ switch (crev) {
+ case 0:
+ umc_config1 = le32_to_cpu(umc_info->v40.umc_config1);
+ ecc_default_enabled =
+ (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE) ? true : false;
+ break;
+ default:
+ /* unsupported crev */
+ return false;
+ }
+ } else {
+ /* unsupported frev */
+ return false;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 49dd9aa8da70..efdb1c48f431 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -127,7 +127,6 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
{
struct drm_gem_object *gobj;
unsigned long size;
- int r;
gobj = drm_gem_object_lookup(p->filp, data->handle);
if (gobj == NULL)
@@ -137,23 +136,14 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
drm_gem_object_put(gobj);
size = amdgpu_bo_size(p->uf_bo);
- if (size != PAGE_SIZE || (data->offset + 8) > size) {
- r = -EINVAL;
- goto error_unref;
- }
+ if (size != PAGE_SIZE || data->offset > (size - 8))
+ return -EINVAL;
- if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm)) {
- r = -EINVAL;
- goto error_unref;
- }
+ if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm))
+ return -EINVAL;
*offset = data->offset;
-
return 0;
-
-error_unref:
- amdgpu_bo_unref(&p->uf_bo);
- return r;
}
static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index e77f048c99d8..2b8356699f23 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -885,13 +885,20 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
*/
static int amdgpu_device_asic_init(struct amdgpu_device *adev)
{
+ int ret;
+
amdgpu_asic_pre_asic_init(adev);
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) ||
- adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
- return amdgpu_atomfirmware_asic_init(adev, true);
- else
+ adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) {
+ amdgpu_psp_wait_for_bootloader(adev);
+ ret = amdgpu_atomfirmware_asic_init(adev, true);
+ return ret;
+ } else {
return amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ }
+
+ return 0;
}
/**
@@ -1238,32 +1245,6 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
}
/*
- * On APUs with >= 64GB white flickering has been observed w/ SG enabled.
- * Disable S/G on such systems until we have a proper fix.
- * https://gitlab.freedesktop.org/drm/amd/-/issues/2354
- * https://gitlab.freedesktop.org/drm/amd/-/issues/2735
- */
-bool amdgpu_sg_display_supported(struct amdgpu_device *adev)
-{
- switch (amdgpu_sg_display) {
- case -1:
- break;
- case 0:
- return false;
- case 1:
- return true;
- default:
- return false;
- }
- if ((totalram_pages() << (PAGE_SHIFT - 10)) +
- (adev->gmc.real_vram_size / 1024) >= 64000000) {
- DRM_WARN("Disabling S/G due to >=64GB RAM\n");
- return false;
- }
- return true;
-}
-
-/*
* Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
* speed switching. Until we have confirmation from Intel that a specific host
* supports it, it's safer that we keep it disabled for all.
@@ -2112,7 +2093,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
adev->flags |= AMD_IS_PX;
if (!(adev->flags & AMD_IS_APU)) {
- parent = pci_upstream_bridge(adev->pdev);
+ parent = pcie_find_root_port(adev->pdev);
adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
}
@@ -4694,9 +4675,12 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
}
if (ret)
- dev_err(adev->dev, "GPU mode1 reset failed\n");
+ goto mode1_reset_failed;
amdgpu_device_load_pci_state(adev->pdev);
+ ret = amdgpu_psp_wait_for_bootloader(adev);
+ if (ret)
+ goto mode1_reset_failed;
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
@@ -4707,7 +4691,17 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
udelay(1);
}
+ if (i >= adev->usec_timeout) {
+ ret = -ETIMEDOUT;
+ goto mode1_reset_failed;
+ }
+
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
+
+ return 0;
+
+mode1_reset_failed:
+ dev_err(adev->dev, "GPU mode1 reset failed\n");
return ret;
}
@@ -4849,7 +4843,7 @@ static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
ktime_get_ts64(&adev->reset_time);
- dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
+ dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_NOWAIT,
amdgpu_devcoredump_read, amdgpu_devcoredump_free);
}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 74ffe6581c85..7d5e7ad28ba8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -1390,6 +1390,7 @@ union gc_info {
struct gc_info_v1_1 v1_1;
struct gc_info_v1_2 v1_2;
struct gc_info_v2_0 v2;
+ struct gc_info_v2_1 v2_1;
};
static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
@@ -1465,6 +1466,15 @@ static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
+ if (gc_info->v2.header.version_minor == 1) {
+ adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
+ adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
+ adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
+ adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
+ adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
+ adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
+ adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
+ }
break;
default:
dev_err(adev->dev,
@@ -1478,6 +1488,7 @@ static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
union mall_info {
struct mall_info_v1_0 v1;
+ struct mall_info_v2_0 v2;
};
static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
@@ -1518,6 +1529,10 @@ static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
adev->gmc.mall_size = mall_size;
adev->gmc.m_half_use = half_use;
break;
+ case 2:
+ mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
+ adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc;
+ break;
default:
dev_err(adev->dev,
"Unhandled MALL info table %d.%d\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index d20dd3f852fc..363e6a2cad8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -38,6 +38,8 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -532,11 +534,29 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
return true;
}
+static int amdgpu_dirtyfb(struct drm_framebuffer *fb, struct drm_file *file,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips, unsigned int num_clips)
+{
+
+ if (file)
+ return -ENOSYS;
+
+ return drm_atomic_helper_dirtyfb(fb, file, flags, color, clips,
+ num_clips);
+}
+
static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
};
+static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .dirty = amdgpu_dirtyfb
+};
+
uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
uint64_t bo_flags)
{
@@ -1139,7 +1159,11 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
if (ret)
goto err;
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+ if (drm_drv_uses_atomic_modeset(dev))
+ ret = drm_framebuffer_init(dev, &rfb->base,
+ &amdgpu_fb_funcs_atomic);
+ else
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index 9c66d98af6d8..7cd0dfaeee20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -170,6 +170,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
csum += pia[size - 1];
if (csum) {
DRM_ERROR("Bad Product Info Area checksum: 0x%02x", csum);
+ kfree(pia);
return -EIO;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index a4ff515ce896..0ca95c4d4bfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -43,6 +43,7 @@
#define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
#define AMDGPU_MAX_GC_INSTANCES 8
+#define KGD_MAX_QUEUES 128
#define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES
#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
@@ -241,6 +242,9 @@ struct amdgpu_gfx_config {
uint32_t gc_gl1c_per_sa;
uint32_t gc_gl1c_size_per_instance;
uint32_t gc_gl2c_per_gpu;
+ uint32_t gc_tcp_size_per_cu;
+ uint32_t gc_num_cu_per_sqc;
+ uint32_t gc_tcc_size;
};
struct amdgpu_cu_info {
@@ -254,7 +258,7 @@ struct amdgpu_cu_info {
uint32_t number;
uint32_t ao_cu_mask;
uint32_t ao_cu_bitmap[4][4];
- uint32_t bitmap[4][4];
+ uint32_t bitmap[AMDGPU_MAX_GC_INSTANCES][4][4];
};
struct amdgpu_gfx_ras {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 99f4df133ed3..d30dc0b718c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -839,7 +839,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
memcpy(&dev_info->cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
sizeof(adev->gfx.cu_info.ao_cu_bitmap));
memcpy(&dev_info->cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
- sizeof(adev->gfx.cu_info.bitmap));
+ sizeof(dev_info->cu_bitmap));
dev_info->vram_type = adev->gmc.vram_type;
dev_info->vram_bit_width = adev->gmc.vram_width;
dev_info->vce_harvest_config = adev->vce.harvest_config;
@@ -940,12 +940,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
struct atom_context *atom_context;
atom_context = adev->mode_info.atom_context;
- memcpy(vbios_info.name, atom_context->name, sizeof(atom_context->name));
- memcpy(vbios_info.vbios_pn, atom_context->vbios_pn, sizeof(atom_context->vbios_pn));
- vbios_info.version = atom_context->version;
- memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str,
- sizeof(atom_context->vbios_ver_str));
- memcpy(vbios_info.date, atom_context->date, sizeof(atom_context->date));
+ if (atom_context) {
+ memcpy(vbios_info.name, atom_context->name,
+ sizeof(atom_context->name));
+ memcpy(vbios_info.vbios_pn, atom_context->vbios_pn,
+ sizeof(atom_context->vbios_pn));
+ vbios_info.version = atom_context->version;
+ memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str,
+ sizeof(atom_context->vbios_ver_str));
+ memcpy(vbios_info.date, atom_context->date,
+ sizeof(atom_context->date));
+ }
return copy_to_user(out, &vbios_info,
min((size_t)size, sizeof(vbios_info))) ? -EFAULT : 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 8fdca54bb8a1..429ef212c1f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -2078,6 +2078,17 @@ int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
}
/* SECUREDISPLAY end */
+int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
+{
+ struct psp_context *psp = &adev->psp;
+ int ret = 0;
+
+ if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
+ ret = psp->funcs->wait_for_bootloader(psp);
+
+ return ret;
+}
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 3384eb94fde0..3e67ed63e638 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -109,6 +109,7 @@ enum psp_reg_prog_id {
struct psp_funcs {
int (*init_microcode)(struct psp_context *psp);
+ int (*wait_for_bootloader)(struct psp_context *psp);
int (*bootloader_load_kdb)(struct psp_context *psp);
int (*bootloader_load_spl)(struct psp_context *psp);
int (*bootloader_load_sysdrv)(struct psp_context *psp);
@@ -533,4 +534,6 @@ int psp_spatial_partition(struct psp_context *psp, int mode);
int is_psp_fw_valid(struct psp_bin_desc bin);
+int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 7689395e44fd..163445baa4fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -764,7 +764,7 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
union ta_ras_cmd_input *info;
- int ret = 0;
+ int ret;
if (!con)
return -EINVAL;
@@ -773,7 +773,7 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
if (enable &&
head->block != AMDGPU_RAS_BLOCK__GFX &&
!amdgpu_ras_is_feature_allowed(adev, head))
- goto out;
+ return 0;
/* Only enable gfx ras feature from host side */
if (head->block == AMDGPU_RAS_BLOCK__GFX &&
@@ -801,16 +801,17 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
enable ? "enable":"disable",
get_ras_block_str(head),
amdgpu_ras_is_poison_mode_supported(adev), ret);
- goto out;
+ kfree(info);
+ return ret;
}
+
+ kfree(info);
}
/* setup the obj */
__amdgpu_ras_feature_enable(adev, head, enable);
-out:
- if (head->block == AMDGPU_RAS_BLOCK__GFX)
- kfree(info);
- return ret;
+
+ return 0;
}
/* Only used in device probe stage and called only once. */
@@ -1052,7 +1053,8 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
info->ce_count = obj->err_data.ce_count;
if (err_data.ce_count) {
- if (adev->smuio.funcs &&
+ if (!adev->aid_mask &&
+ adev->smuio.funcs &&
adev->smuio.funcs->get_socket_id &&
adev->smuio.funcs->get_die_id) {
dev_info(adev->dev, "socket: %d, die: %d "
@@ -1072,7 +1074,8 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
}
}
if (err_data.ue_count) {
- if (adev->smuio.funcs &&
+ if (!adev->aid_mask &&
+ adev->smuio.funcs &&
adev->smuio.funcs->get_socket_id &&
adev->smuio.funcs->get_die_id) {
dev_info(adev->dev, "socket: %d, die: %d "
@@ -2399,6 +2402,7 @@ static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev)) {
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 6):
return true;
default:
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 4764d2171f92..595d5e535aca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -158,9 +158,10 @@ static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 7): /* Sienna cichlid */
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 2): /* Aldebaran */
- case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
return true;
+ case IP_VERSION(13, 0, 6):
+ return (adev->gmc.is_app_apu) ? false : true;
default:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index c6b4337eb20c..10df731998b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -81,7 +81,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
unsigned int size)
{
struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size,
- GFP_KERNEL, true, 0);
+ GFP_KERNEL, false, 0);
if (IS_ERR(sa)) {
*sa_bo = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 0aee9c8288a2..9032d7a24d7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -9449,7 +9449,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(
adev, disable_masks[i * 2 + j]);
bitmap = gfx_v10_0_get_cu_active_bitmap_per_sh(adev);
- cu_info->bitmap[i][j] = bitmap;
+ cu_info->bitmap[0][i][j] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
if (bitmap & mask) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 5c3db694afa8..762d7a19f1be 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -6368,7 +6368,7 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
* SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]}
* SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]}
*/
- cu_info->bitmap[i % 4][j + (i / 4) * 2] = bitmap;
+ cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
if (bitmap & mask)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index da6caff78c22..34f9211b2679 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -3577,7 +3577,7 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
gfx_v6_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * 2 + j]);
bitmap = gfx_v6_0_get_cu_enabled(adev);
- cu_info->bitmap[i][j] = bitmap;
+ cu_info->bitmap[0][i][j] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
if (bitmap & mask) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 90b034b173c1..c2faf6b4c2fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -5119,7 +5119,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
gfx_v7_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * 2 + j]);
bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
- cu_info->bitmap[i][j] = bitmap;
+ cu_info->bitmap[0][i][j] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
if (bitmap & mask) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 51c1745c8369..885ebd703260 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -7121,7 +7121,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
gfx_v8_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * 2 + j]);
bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
- cu_info->bitmap[i][j] = bitmap;
+ cu_info->bitmap[0][i][j] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
if (bitmap & mask) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 458faf657042..fd61574a737c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1499,7 +1499,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
- if (cu_info->bitmap[i][j] & mask) {
+ if (cu_info->bitmap[0][i][j] & mask) {
if (counter == pg_always_on_cu_num)
WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
if (counter < always_on_cu_num)
@@ -7233,7 +7233,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
* SE6,SH0 --> bitmap[2][1]
* SE7,SH0 --> bitmap[3][1]
*/
- cu_info->bitmap[i % 4][j + i / 4] = bitmap;
+ cu_info->bitmap[0][i % 4][j + i / 4] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
if (bitmap & mask) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 57ed4e5c294c..18ce5fe45f6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -203,6 +203,9 @@ static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
if (adev->rev_id == 0) {
WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
REDUCE_FIFO_DEPTH_BY_2, 2);
+ } else {
+ WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2,
+ SPARE, 0x1);
}
}
}
@@ -860,11 +863,15 @@ static int gfx_v9_4_3_sw_init(void *handle)
if (r)
return r;
- r = amdgpu_gfx_sysfs_init(adev);
+ r = amdgpu_gfx_ras_sw_init(adev);
if (r)
return r;
- return amdgpu_gfx_ras_sw_init(adev);
+
+ if (!amdgpu_sriov_vf(adev))
+ r = amdgpu_gfx_sysfs_init(adev);
+
+ return r;
}
static int gfx_v9_4_3_sw_fini(void *handle)
@@ -885,7 +892,8 @@ static int gfx_v9_4_3_sw_fini(void *handle)
gfx_v9_4_3_mec_fini(adev);
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
gfx_v9_4_3_free_microcode(adev);
- amdgpu_gfx_sysfs_fini(adev);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_gfx_sysfs_fini(adev);
return 0;
}
@@ -2219,15 +2227,6 @@ static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev,
WREG32_SOC15(GC, GET_INST(GC, xcc_id),
regRLC_CGTT_MGCG_OVERRIDE, data);
- def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CLK_CNTL);
-
- if (enable)
- data &= ~RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK;
- else
- data |= RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK;
-
- if (def != data)
- WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CLK_CNTL, data);
}
static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
@@ -4048,7 +4047,8 @@ static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev,
uint32_t i;
uint32_t data;
- data = REG_SET_FIELD(0, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
+ data = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_TIMEOUT_CONFIG);
+ data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0);
if (amdgpu_watchdog_timer.timeout_fatal_disable &&
@@ -4259,7 +4259,7 @@ static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
}
static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
- u32 bitmap)
+ u32 bitmap, int xcc_id)
{
u32 data;
@@ -4269,15 +4269,15 @@ static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
- WREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_SHADER_ARRAY_CONFIG, data);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data);
}
-static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev)
+static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_id)
{
u32 data, mask;
- data = RREG32_SOC15(GC, GET_INST(GC, 0), regCC_GC_SHADER_ARRAY_CONFIG);
- data |= RREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_SHADER_ARRAY_CONFIG);
+ data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG);
+ data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG);
data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
@@ -4290,7 +4290,7 @@ static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev)
static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info)
{
- int i, j, k, counter, active_cu_number = 0;
+ int i, j, k, counter, xcc_id, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
unsigned disable_masks[4 * 4];
@@ -4309,46 +4309,38 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
adev->gfx.config.max_sh_per_se);
mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
- for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- mask = 1;
- ao_bitmap = 0;
- counter = 0;
- gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, 0);
- gfx_v9_4_3_set_user_cu_inactive_bitmap(
- adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
- bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev);
-
- /*
- * The bitmap(and ao_cu_bitmap) in cu_info structure is
- * 4x4 size array, and it's usually suitable for Vega
- * ASICs which has 4*2 SE/SH layout.
- * But for Arcturus, SE/SH layout is changed to 8*1.
- * To mostly reduce the impact, we make it compatible
- * with current bitmap array as below:
- * SE4,SH0 --> bitmap[0][1]
- * SE5,SH0 --> bitmap[1][1]
- * SE6,SH0 --> bitmap[2][1]
- * SE7,SH0 --> bitmap[3][1]
- */
- cu_info->bitmap[i % 4][j + i / 4] = bitmap;
-
- for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
- if (bitmap & mask) {
- if (counter < adev->gfx.config.max_cu_per_sh)
- ao_bitmap |= mask;
- counter++;
+ for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ mask = 1;
+ ao_bitmap = 0;
+ counter = 0;
+ gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
+ gfx_v9_4_3_set_user_cu_inactive_bitmap(
+ adev,
+ disable_masks[i * adev->gfx.config.max_sh_per_se + j],
+ xcc_id);
+ bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev, xcc_id);
+
+ cu_info->bitmap[xcc_id][i][j] = bitmap;
+
+ for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
+ if (bitmap & mask) {
+ if (counter < adev->gfx.config.max_cu_per_sh)
+ ao_bitmap |= mask;
+ counter++;
+ }
+ mask <<= 1;
}
- mask <<= 1;
+ active_cu_number += counter;
+ if (i < 2 && j < 2)
+ ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+ cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
}
- active_cu_number += counter;
- if (i < 2 && j < 2)
- ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
- cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
}
+ gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
+ xcc_id);
}
- gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
- 0);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index 15612915bb6c..1de79d660285 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -360,8 +360,10 @@ static int jpeg_v4_0_3_hw_fini(void *handle)
cancel_delayed_work_sync(&adev->jpeg.idle_work);
- if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
- ret = jpeg_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ if (!amdgpu_sriov_vf(adev)) {
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
+ ret = jpeg_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
index d5ed9e0e1a5f..e5b5b0f4940f 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
@@ -345,6 +345,9 @@ static void nbio_v4_3_init_registers(struct amdgpu_device *adev)
data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK;
WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data);
}
+ if (amdgpu_sriov_vf(adev))
+ adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
+ regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
static u32 nbio_v4_3_get_rom_offset(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index 9ea072374cb7..f85eec05d218 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -437,6 +437,24 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
XCC_DOORBELL_FENCE__SHUB_SLV_MODE_MASK);
}
+
+ if (!amdgpu_sriov_vf(adev)) {
+ u32 baco_cntl;
+ for_each_inst(i, adev->aid_mask) {
+ baco_cntl = RREG32_SOC15(NBIO, i, regBIF_BX0_BACO_CNTL);
+ if (baco_cntl & (BIF_BX0_BACO_CNTL__BACO_DUMMY_EN_MASK |
+ BIF_BX0_BACO_CNTL__BACO_EN_MASK)) {
+ baco_cntl &= ~(
+ BIF_BX0_BACO_CNTL__BACO_DUMMY_EN_MASK |
+ BIF_BX0_BACO_CNTL__BACO_EN_MASK);
+ dev_dbg(adev->dev,
+ "Unsetting baco dummy mode %x",
+ baco_cntl);
+ WREG32_SOC15(NBIO, i, regBIF_BX0_BACO_CNTL,
+ baco_cntl);
+ }
+ }
+ }
}
static u64 nbio_v7_9_get_pcie_replay_count(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index 10b17bd5aebe..469eed084976 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -133,12 +133,32 @@ static bool psp_v13_0_is_sos_alive(struct psp_context *psp)
return sol_reg != 0x0;
}
-static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
+static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
+ int retry_loop, ret;
- int ret;
- int retry_loop;
+ for (retry_loop = 0; retry_loop < 70; retry_loop++) {
+ /* Wait for bootloader to signify that is
+ ready having bit 31 of C2PMSG_33 set to 1 */
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_33),
+ 0x80000000, 0xffffffff, false);
+
+ if (ret == 0)
+ break;
+ }
+
+ if (ret)
+ dev_warn(adev->dev, "Bootloader wait timed out");
+
+ return ret;
+}
+
+static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int retry_loop, ret;
/* Wait for bootloader to signify that it is ready having bit 31 of
* C2PMSG_35 set to 1. All other bits are expected to be cleared.
@@ -157,6 +177,19 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
return ret;
}
+static int psp_v13_0_wait_for_bootloader_steady_state(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6)) {
+ psp_v13_0_wait_for_vmbx_ready(psp);
+
+ return psp_v13_0_wait_for_bootloader(psp);
+ }
+
+ return 0;
+}
+
static int psp_v13_0_bootloader_load_component(struct psp_context *psp,
struct psp_bin_desc *bin_desc,
enum psp_bootloader_cmd bl_cmd)
@@ -714,6 +747,7 @@ static int psp_v13_0_fatal_error_recovery_quirk(struct psp_context *psp)
static const struct psp_funcs psp_v13_0_funcs = {
.init_microcode = psp_v13_0_init_microcode,
+ .wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state,
.bootloader_load_kdb = psp_v13_0_bootloader_load_kdb,
.bootloader_load_spl = psp_v13_0_bootloader_load_spl,
.bootloader_load_sysdrv = psp_v13_0_bootloader_load_sysdrv,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index c45721ca916e..f5be40d7ba36 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -559,8 +559,10 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
*/
if (amdgpu_gpu_recovery == 4 || amdgpu_gpu_recovery == 5)
return AMD_RESET_METHOD_MODE2;
+ else if (!(adev->flags & AMD_IS_APU))
+ return AMD_RESET_METHOD_MODE1;
else
- return AMD_RESET_METHOD_NONE;
+ return AMD_RESET_METHOD_MODE2;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 40d23738ee4e..8b2ff2b281b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -766,7 +766,7 @@ static int soc21_common_hw_init(void *handle)
* for the purpose of expose those registers
* to process space
*/
- if (adev->nbio.funcs->remap_hdp_registers)
+ if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
adev->nbio.funcs->enable_doorbell_aperture(adev, true);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 86fb7ac7982a..f76b7aee5c0a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -2087,7 +2087,8 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
amdgpu_amdkfd_get_cu_info(kdev->adev, &cu_info);
cu->num_simd_per_cu = cu_info.simd_per_cu;
- cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
+ cu->num_simd_cores = cu_info.simd_per_cu *
+ (cu_info.cu_active_number / kdev->kfd->num_nodes);
cu->max_waves_simd = cu_info.max_waves_per_simd;
cu->wave_front_size = cu_info.wave_front_size;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
index 387a8ef49385..74c2d7a0d628 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
@@ -79,6 +79,10 @@ struct crat_header {
#define CRAT_SUBTYPE_IOLINK_AFFINITY 5
#define CRAT_SUBTYPE_MAX 6
+/*
+ * Do not change the value of CRAT_SIBLINGMAP_SIZE from 32
+ * as it breaks the ABI.
+ */
#define CRAT_SIBLINGMAP_SIZE 32
/*
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index b166f30f083e..0d3d538b64eb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -216,7 +216,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
if (q->wptr_bo) {
wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
- queue_input.wptr_mc_addr = ((uint64_t)q->wptr_bo->tbo.resource->start << PAGE_SHIFT) + wptr_addr_off;
+ queue_input.wptr_mc_addr = amdgpu_bo_gpu_offset(q->wptr_bo) + wptr_addr_off;
}
queue_input.is_kfd_process = 1;
@@ -1677,8 +1677,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
dqm->dev->kfd2kgd->build_grace_period_packet_info(
dqm->dev->adev, dqm->wait_times,
grace_period, &reg_offset,
- &dqm->wait_times,
- ffs(dqm->dev->xcc_mask) - 1);
+ &dqm->wait_times);
}
dqm_unlock(dqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index c2e0b79dcc6d..7b38537c7c99 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -162,6 +162,7 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
return NULL;
*doorbell_off = amdgpu_doorbell_index_on_bar(kfd->adev, kfd->doorbells, inx);
+ inx *= 2;
pr_debug("Get kernel queue doorbell\n"
" doorbell offset == 0x%08X\n"
@@ -176,6 +177,7 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
unsigned int inx;
inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
+ inx /= 2;
mutex_lock(&kfd->doorbell_mutex);
__clear_bit(inx, kfd->doorbell_bitmap);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index f0731a6a5306..830396b1c3b1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -384,7 +384,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
default:
break;
}
- kfd_signal_event_interrupt(pasid, context_id0 & 0xffffff, 24);
+ kfd_signal_event_interrupt(pasid, sq_int_data, 24);
} else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_DEBUG_DOORBELL_ID(context_id0),
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index d01bb57733b3..447829c22295 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -97,18 +97,22 @@ void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,
void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
const uint32_t *cu_mask, uint32_t cu_mask_count,
- uint32_t *se_mask)
+ uint32_t *se_mask, uint32_t inst)
{
struct kfd_cu_info cu_info;
uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0};
bool wgp_mode_req = KFD_GC_VERSION(mm->dev) >= IP_VERSION(10, 0, 0);
uint32_t en_mask = wgp_mode_req ? 0x3 : 0x1;
- int i, se, sh, cu, cu_bitmap_sh_mul, inc = wgp_mode_req ? 2 : 1;
+ int i, se, sh, cu, cu_bitmap_sh_mul, cu_inc = wgp_mode_req ? 2 : 1;
+ uint32_t cu_active_per_node;
+ int inc = cu_inc * NUM_XCC(mm->dev->xcc_mask);
+ int xcc_inst = inst + ffs(mm->dev->xcc_mask) - 1;
amdgpu_amdkfd_get_cu_info(mm->dev->adev, &cu_info);
- if (cu_mask_count > cu_info.cu_active_number)
- cu_mask_count = cu_info.cu_active_number;
+ cu_active_per_node = cu_info.cu_active_number / mm->dev->kfd->num_nodes;
+ if (cu_mask_count > cu_active_per_node)
+ cu_mask_count = cu_active_per_node;
/* Exceeding these bounds corrupts the stack and indicates a coding error.
* Returning with no CU's enabled will hang the queue, which should be
@@ -141,7 +145,8 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
for (se = 0; se < cu_info.num_shader_engines; se++)
for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++)
cu_per_sh[se][sh] = hweight32(
- cu_info.cu_bitmap[se % 4][sh + (se / 4) * cu_bitmap_sh_mul]);
+ cu_info.cu_bitmap[xcc_inst][se % 4][sh + (se / 4) *
+ cu_bitmap_sh_mul]);
/* Symmetrically map cu_mask to all SEs & SHs:
* se_mask programs up to 2 SH in the upper and lower 16 bits.
@@ -164,20 +169,33 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
* cu_mask[0] bit8 -> se_mask[0] bit1 (SE0,SH0,CU1)
* ...
*
+ * For GFX 9.4.3, the following code only looks at a
+ * subset of the cu_mask corresponding to the inst parameter.
+ * If we have n XCCs under one GPU node
+ * cu_mask[0] bit0 -> XCC0 se_mask[0] bit0 (XCC0,SE0,SH0,CU0)
+ * cu_mask[0] bit1 -> XCC1 se_mask[0] bit0 (XCC1,SE0,SH0,CU0)
+ * ..
+ * cu_mask[0] bitn -> XCCn se_mask[0] bit0 (XCCn,SE0,SH0,CU0)
+ * cu_mask[0] bit n+1 -> XCC0 se_mask[1] bit0 (XCC0,SE1,SH0,CU0)
+ *
+ * For example, if there are 6 XCCs under 1 KFD node, this code
+ * running for each inst, will look at the bits as:
+ * inst, inst + 6, inst + 12...
+ *
* First ensure all CUs are disabled, then enable user specified CUs.
*/
for (i = 0; i < cu_info.num_shader_engines; i++)
se_mask[i] = 0;
- i = 0;
- for (cu = 0; cu < 16; cu += inc) {
+ i = inst;
+ for (cu = 0; cu < 16; cu += cu_inc) {
for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) {
for (se = 0; se < cu_info.num_shader_engines; se++) {
if (cu_per_sh[se][sh] > cu) {
if (cu_mask[i / 32] & (en_mask << (i % 32)))
se_mask[se] |= en_mask << (cu + sh * 16);
i += inc;
- if (i == cu_mask_count)
+ if (i >= cu_mask_count)
return;
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index 23158db7da03..57bf5e513f4d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -138,7 +138,7 @@ void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,
void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
const uint32_t *cu_mask, uint32_t cu_mask_count,
- uint32_t *se_mask);
+ uint32_t *se_mask, uint32_t inst);
int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index ee1d32d957f2..1a4a69943c71 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -52,7 +52,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
return;
mqd_symmetrically_map_cu_mask(mm,
- minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
+ minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 83699392c808..8b7fed913526 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -52,7 +52,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
return;
mqd_symmetrically_map_cu_mask(mm,
- minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
+ minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index 2319467d2d95..15277f1d5cf0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -71,7 +71,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
}
mqd_symmetrically_map_cu_mask(mm,
- minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
+ minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
m->compute_static_thread_mgmt_se0 = se_mask[0];
m->compute_static_thread_mgmt_se1 = se_mask[1];
@@ -321,6 +321,43 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
return 0;
}
+static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
+{
+ struct v11_compute_mqd *m;
+
+ m = get_mqd(mqd);
+
+ memcpy(mqd_dst, m, sizeof(struct v11_compute_mqd));
+}
+
+static void restore_mqd(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *qp,
+ const void *mqd_src,
+ const void *ctl_stack_src, const u32 ctl_stack_size)
+{
+ uint64_t addr;
+ struct v11_compute_mqd *m;
+
+ m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr;
+ addr = mqd_mem_obj->gpu_addr;
+
+ memcpy(m, mqd_src, sizeof(*m));
+
+ *mqd = m;
+ if (gart_addr)
+ *gart_addr = addr;
+
+ m->cp_hqd_pq_doorbell_control =
+ qp->doorbell_off <<
+ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+ pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
+ m->cp_hqd_pq_doorbell_control);
+
+ qp->is_active = 0;
+}
+
+
static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -457,6 +494,9 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v11_compute_mqd);
mqd->get_wave_state = get_wave_state;
+ mqd->mqd_stride = kfd_mqd_stride;
+ mqd->checkpoint_mqd = checkpoint_mqd;
+ mqd->restore_mqd = restore_mqd;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
@@ -472,6 +512,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->destroy_mqd = destroy_hiq_mqd;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v11_compute_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
@@ -500,7 +541,10 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->update_mqd = update_mqd_sdma;
mqd->destroy_mqd = kfd_destroy_mqd_sdma;
mqd->is_occupied = kfd_is_occupied_sdma;
+ mqd->checkpoint_mqd = checkpoint_mqd;
+ mqd->restore_mqd = restore_mqd;
mqd->mqd_size = sizeof(struct v11_sdma_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index e23d32f35607..42d881809dc7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -60,7 +60,7 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
}
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
- struct mqd_update_info *minfo)
+ struct mqd_update_info *minfo, uint32_t inst)
{
struct v9_mqd *m;
uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
@@ -69,27 +69,36 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
return;
mqd_symmetrically_map_cu_mask(mm,
- minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
+ minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, inst);
m = get_mqd(mqd);
+
m->compute_static_thread_mgmt_se0 = se_mask[0];
m->compute_static_thread_mgmt_se1 = se_mask[1];
m->compute_static_thread_mgmt_se2 = se_mask[2];
m->compute_static_thread_mgmt_se3 = se_mask[3];
- m->compute_static_thread_mgmt_se4 = se_mask[4];
- m->compute_static_thread_mgmt_se5 = se_mask[5];
- m->compute_static_thread_mgmt_se6 = se_mask[6];
- m->compute_static_thread_mgmt_se7 = se_mask[7];
-
- pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
- m->compute_static_thread_mgmt_se0,
- m->compute_static_thread_mgmt_se1,
- m->compute_static_thread_mgmt_se2,
- m->compute_static_thread_mgmt_se3,
- m->compute_static_thread_mgmt_se4,
- m->compute_static_thread_mgmt_se5,
- m->compute_static_thread_mgmt_se6,
- m->compute_static_thread_mgmt_se7);
+ if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3)) {
+ m->compute_static_thread_mgmt_se4 = se_mask[4];
+ m->compute_static_thread_mgmt_se5 = se_mask[5];
+ m->compute_static_thread_mgmt_se6 = se_mask[6];
+ m->compute_static_thread_mgmt_se7 = se_mask[7];
+
+ pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
+ m->compute_static_thread_mgmt_se0,
+ m->compute_static_thread_mgmt_se1,
+ m->compute_static_thread_mgmt_se2,
+ m->compute_static_thread_mgmt_se3,
+ m->compute_static_thread_mgmt_se4,
+ m->compute_static_thread_mgmt_se5,
+ m->compute_static_thread_mgmt_se6,
+ m->compute_static_thread_mgmt_se7);
+ } else {
+ pr_debug("inst: %u, update cu mask to %#x %#x %#x %#x\n",
+ inst, m->compute_static_thread_mgmt_se0,
+ m->compute_static_thread_mgmt_se1,
+ m->compute_static_thread_mgmt_se2,
+ m->compute_static_thread_mgmt_se3);
+ }
}
static void set_priority(struct v9_mqd *m, struct queue_properties *q)
@@ -290,7 +299,8 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address)
m->cp_hqd_ctx_save_control = 0;
- update_cu_mask(mm, mqd, minfo);
+ if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3))
+ update_cu_mask(mm, mqd, minfo, 0);
set_priority(m, q);
q->is_active = QUEUE_IS_ACTIVE(*q);
@@ -676,6 +686,8 @@ static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd + size * xcc);
update_mqd(mm, m, q, minfo);
+ update_cu_mask(mm, mqd, minfo, xcc);
+
if (q->format == KFD_QUEUE_FORMAT_AQL) {
switch (xcc) {
case 0:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index 657c37822980..3e1a574d4ea6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -55,7 +55,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
return;
mqd_symmetrically_map_cu_mask(mm,
- minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
+ minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index 8ce6f5200905..1a03173e2313 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -299,8 +299,7 @@ static int pm_set_grace_period_v9(struct packet_manager *pm,
pm->dqm->wait_times,
grace_period,
&reg_offset,
- &reg_data,
- 0);
+ &reg_data);
if (grace_period == USE_DEFAULT_GRACE_PERIOD)
reg_data = pm->dqm->wait_times;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 3d9ce44d88da..fa24e1852493 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -1466,8 +1466,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type);
static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
{
- return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
+ return KFD_GC_VERSION(dev) > IP_VERSION(9, 4, 2) ||
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) ||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 011561605983..bb16b795d1bc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1686,6 +1686,8 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
WRITE_ONCE(p->svms.faulting_task, NULL);
if (r) {
pr_debug("failed %d to get svm range pages\n", r);
+ if (r == -EBUSY)
+ r = -EAGAIN;
goto unreserve_out;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index ff98fded9534..c8c75ff7cea8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -450,8 +450,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_32bit_prop(buffer, offs, "cpu_cores_count",
dev->node_props.cpu_cores_count);
sysfs_show_32bit_prop(buffer, offs, "simd_count",
- dev->gpu ? (dev->node_props.simd_count *
- NUM_XCC(dev->gpu->xcc_mask)) : 0);
+ dev->gpu ? dev->node_props.simd_count : 0);
sysfs_show_32bit_prop(buffer, offs, "mem_banks_count",
dev->node_props.mem_banks_count);
sysfs_show_32bit_prop(buffer, offs, "caches_count",
@@ -1597,14 +1596,17 @@ static int fill_in_l1_pcache(struct kfd_cache_properties **props_ext,
static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
struct kfd_gpu_cache_info *pcache_info,
struct kfd_cu_info *cu_info,
- int cache_type, unsigned int cu_processor_id)
+ int cache_type, unsigned int cu_processor_id,
+ struct kfd_node *knode)
{
unsigned int cu_sibling_map_mask;
int first_active_cu;
- int i, j, k;
+ int i, j, k, xcc, start, end;
struct kfd_cache_properties *pcache = NULL;
- cu_sibling_map_mask = cu_info->cu_bitmap[0][0];
+ start = ffs(knode->xcc_mask) - 1;
+ end = start + NUM_XCC(knode->xcc_mask);
+ cu_sibling_map_mask = cu_info->cu_bitmap[start][0][0];
cu_sibling_map_mask &=
((1 << pcache_info[cache_type].num_cu_shared) - 1);
first_active_cu = ffs(cu_sibling_map_mask);
@@ -1639,16 +1641,18 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
cu_sibling_map_mask = cu_sibling_map_mask >> (first_active_cu - 1);
k = 0;
- for (i = 0; i < cu_info->num_shader_engines; i++) {
- for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) {
- pcache->sibling_map[k] = (uint8_t)(cu_sibling_map_mask & 0xFF);
- pcache->sibling_map[k+1] = (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
- pcache->sibling_map[k+2] = (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
- pcache->sibling_map[k+3] = (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
- k += 4;
-
- cu_sibling_map_mask = cu_info->cu_bitmap[i % 4][j + i / 4];
- cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1);
+ for (xcc = start; xcc < end; xcc++) {
+ for (i = 0; i < cu_info->num_shader_engines; i++) {
+ for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) {
+ pcache->sibling_map[k] = (uint8_t)(cu_sibling_map_mask & 0xFF);
+ pcache->sibling_map[k+1] = (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
+ pcache->sibling_map[k+2] = (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
+ pcache->sibling_map[k+3] = (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
+ k += 4;
+
+ cu_sibling_map_mask = cu_info->cu_bitmap[xcc][i % 4][j + i / 4];
+ cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1);
+ }
}
}
pcache->sibling_map_size = k;
@@ -1666,7 +1670,7 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_node *kdev)
{
struct kfd_gpu_cache_info *pcache_info = NULL;
- int i, j, k;
+ int i, j, k, xcc, start, end;
int ct = 0;
unsigned int cu_processor_id;
int ret;
@@ -1700,37 +1704,42 @@ static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct
* then it will consider only one CU from
* the shared unit
*/
+ start = ffs(kdev->xcc_mask) - 1;
+ end = start + NUM_XCC(kdev->xcc_mask);
+
for (ct = 0; ct < num_of_cache_types; ct++) {
cu_processor_id = gpu_processor_id;
if (pcache_info[ct].cache_level == 1) {
- for (i = 0; i < pcu_info->num_shader_engines; i++) {
- for (j = 0; j < pcu_info->num_shader_arrays_per_engine; j++) {
- for (k = 0; k < pcu_info->num_cu_per_sh; k += pcache_info[ct].num_cu_shared) {
+ for (xcc = start; xcc < end; xcc++) {
+ for (i = 0; i < pcu_info->num_shader_engines; i++) {
+ for (j = 0; j < pcu_info->num_shader_arrays_per_engine; j++) {
+ for (k = 0; k < pcu_info->num_cu_per_sh; k += pcache_info[ct].num_cu_shared) {
- ret = fill_in_l1_pcache(&props_ext, pcache_info, pcu_info,
- pcu_info->cu_bitmap[i % 4][j + i / 4], ct,
+ ret = fill_in_l1_pcache(&props_ext, pcache_info, pcu_info,
+ pcu_info->cu_bitmap[xcc][i % 4][j + i / 4], ct,
cu_processor_id, k);
- if (ret < 0)
- break;
+ if (ret < 0)
+ break;
- if (!ret) {
- num_of_entries++;
- list_add_tail(&props_ext->list, &dev->cache_props);
- }
+ if (!ret) {
+ num_of_entries++;
+ list_add_tail(&props_ext->list, &dev->cache_props);
+ }
- /* Move to next CU block */
- num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
- pcu_info->num_cu_per_sh) ?
- pcache_info[ct].num_cu_shared :
- (pcu_info->num_cu_per_sh - k);
- cu_processor_id += num_cu_shared;
+ /* Move to next CU block */
+ num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
+ pcu_info->num_cu_per_sh) ?
+ pcache_info[ct].num_cu_shared :
+ (pcu_info->num_cu_per_sh - k);
+ cu_processor_id += num_cu_shared;
+ }
}
}
}
} else {
ret = fill_in_l2_l3_pcache(&props_ext, pcache_info,
- pcu_info, ct, cu_processor_id);
+ pcu_info, ct, cu_processor_id, kdev);
if (ret < 0)
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index dea32a9e5506..27386ce9a021 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -89,7 +89,7 @@ struct kfd_mem_properties {
struct attribute attr;
};
-#define CACHE_SIBLINGMAP_SIZE 64
+#define CACHE_SIBLINGMAP_SIZE 128
struct kfd_cache_properties {
struct list_head list;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 268cb99a4c4b..868946dd7ef1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -65,6 +65,7 @@
#include "amdgpu_dm_debugfs.h"
#endif
#include "amdgpu_dm_psr.h"
+#include "amdgpu_dm_replay.h"
#include "ivsrcid/ivsrcid_vislands30.h"
@@ -1273,11 +1274,15 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
- page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
- page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
- page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
- page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
- page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
+ page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_base.high_part = upper_32_bits(pt_base);
page_table_base.low_part = lower_32_bits(pt_base);
pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
@@ -1639,8 +1644,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
break;
}
- if (init_data.flags.gpu_vm_support)
- init_data.flags.gpu_vm_support = amdgpu_sg_display_supported(adev);
+ if (init_data.flags.gpu_vm_support &&
+ (amdgpu_sg_display == 0))
+ init_data.flags.gpu_vm_support = false;
if (init_data.flags.gpu_vm_support)
adev->mode_info.gpu_vm_support = true;
@@ -2334,14 +2340,62 @@ static int dm_late_init(void *handle)
return detect_mst_link_for_all_connectors(adev_to_drm(adev));
}
+static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
+{
+ int ret;
+ u8 guid[16];
+ u64 tmp64;
+
+ mutex_lock(&mgr->lock);
+ if (!mgr->mst_primary)
+ goto out_fail;
+
+ if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
+ drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+
+ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
+ if (ret < 0) {
+ drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+
+ /* Some hubs forget their guids after they resume */
+ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+ if (ret != 16) {
+ drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+
+ if (memchr_inv(guid, 0, 16) == NULL) {
+ tmp64 = get_jiffies_64();
+ memcpy(&guid[0], &tmp64, sizeof(u64));
+ memcpy(&guid[8], &tmp64, sizeof(u64));
+
+ ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16);
+
+ if (ret != 16) {
+ drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+ }
+
+ memcpy(mgr->mst_primary->guid, guid, 16);
+
+out_fail:
+ mutex_unlock(&mgr->lock);
+}
+
static void s3_handle_mst(struct drm_device *dev, bool suspend)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct drm_dp_mst_topology_mgr *mgr;
- int ret;
- bool need_hotplug = false;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@@ -2363,18 +2417,15 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
if (!dp_is_lttpr_present(aconnector->dc_link))
try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
- ret = drm_dp_mst_topology_mgr_resume(mgr, true);
- if (ret < 0) {
- dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
- aconnector->dc_link);
- need_hotplug = true;
- }
+ /* TODO: move resume_mst_branch_status() into drm mst resume again
+ * once topology probing work is pulled out from mst resume into mst
+ * resume 2nd step. mst resume 2nd step should be called after old
+ * state getting restored (i.e. drm_atomic_helper_resume()).
+ */
+ resume_mst_branch_status(mgr);
}
}
drm_connector_list_iter_end(&iter);
-
- if (need_hotplug)
- drm_kms_helper_hotplug_event(dev);
}
static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
@@ -2768,7 +2819,8 @@ static int dm_resume(void *handle)
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
struct dc_state *dc_state;
- int i, r, j;
+ int i, r, j, ret;
+ bool need_hotplug = false;
if (amdgpu_in_reset(adev)) {
dc_state = dm->cached_dc_state;
@@ -2866,7 +2918,7 @@ static int dm_resume(void *handle)
continue;
/*
- * this is the case when traversing through already created
+ * this is the case when traversing through already created end sink
* MST connectors, should be skipped
*/
if (aconnector && aconnector->mst_root)
@@ -2926,6 +2978,27 @@ static int dm_resume(void *handle)
dm->cached_state = NULL;
+ /* Do mst topology probing after resuming cached state*/
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type != dc_connection_mst_branch ||
+ aconnector->mst_root)
+ continue;
+
+ ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true);
+
+ if (ret < 0) {
+ dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+ aconnector->dc_link);
+ need_hotplug = true;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+
+ if (need_hotplug)
+ drm_kms_helper_hotplug_event(ddev);
+
amdgpu_dm_irq_resume_late(adev);
amdgpu_dm_smu_write_watermarks_table(adev);
@@ -4265,6 +4338,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane;
bool psr_feature_enabled = false;
+ bool replay_feature_enabled = false;
int max_overlay = dm->dc->caps.max_slave_planes;
dm->display_indexes_num = dm->dc->caps.max_streams;
@@ -4374,6 +4448,20 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
}
+ if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ replay_feature_enabled = true;
+ break;
+ default:
+ replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
+ break;
+ }
+ }
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL;
@@ -4422,6 +4510,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
amdgpu_dm_update_connector_after_detect(aconnector);
setup_backlight_device(dm, aconnector);
+ /*
+ * Disable psr if replay can be enabled
+ */
+ if (replay_feature_enabled && amdgpu_dm_setup_replay(link, aconnector))
+ psr_feature_enabled = false;
+
if (psr_feature_enabled)
amdgpu_dm_set_psr_caps(link);
@@ -6004,8 +6098,6 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (recalculate_timing)
drm_mode_set_crtcinfo(&saved_mode, 0);
- else
- drm_mode_set_crtcinfo(&mode, 0);
/*
* If scaling is enabled and refresh rate didn't change
@@ -6567,6 +6659,8 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
goto fail;
}
+ drm_mode_set_crtcinfo(mode, 0);
+
stream = create_validate_stream_for_sink(aconnector, mode,
to_dm_connector_state(connector->state),
NULL);
@@ -8051,7 +8145,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].plane_info =
&bundle->plane_infos[planes_count];
- if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
+ if (acrtc_state->stream->link->psr_settings.psr_feature_enabled ||
+ acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
fill_dc_dirty_rects(plane, old_plane_state,
new_plane_state, new_crtc_state,
&bundle->flip_addrs[planes_count],
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index a2d34be82613..9e4cc5eeda76 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -620,7 +620,7 @@ struct amdgpu_hdmi_vsdb_info {
unsigned int max_refresh_rate_hz;
/**
- * @replay mode: Replay supported
+ * @replay_mode: Replay supported
*/
bool replay_mode;
};
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 30d4c6fd95f5..97b7a0b8a1c2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -29,6 +29,7 @@
#include "dc.h"
#include "amdgpu.h"
#include "amdgpu_dm_psr.h"
+#include "amdgpu_dm_replay.h"
#include "amdgpu_dm_crtc.h"
#include "amdgpu_dm_plane.h"
#include "amdgpu_dm_trace.h"
@@ -123,7 +124,12 @@ static void vblank_control_worker(struct work_struct *work)
* fill_dc_dirty_rects().
*/
if (vblank_work->stream && vblank_work->stream->link) {
- if (vblank_work->enable) {
+ /*
+ * Prioritize replay, instead of psr
+ */
+ if (vblank_work->stream->link->replay_settings.replay_feature_enabled)
+ amdgpu_dm_replay_enable(vblank_work->stream, false);
+ else if (vblank_work->enable) {
if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
vblank_work->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(vblank_work->stream);
@@ -132,6 +138,7 @@ static void vblank_control_worker(struct work_struct *work)
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) &&
#endif
+ vblank_work->stream->link->panel_config.psr.disallow_replay &&
vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
amdgpu_dm_psr_enable(vblank_work->stream);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 8eeca160d434..cc74dd69acf2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -1269,6 +1269,13 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
attributes.rotation_angle = 0;
attributes.attribute_flags.value = 0;
+ /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
+ * legacy gamma setup.
+ */
+ if (crtc_state->cm_is_degamma_srgb &&
+ adev->dm.dc->caps.color.dpp.gamma_corr)
+ attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
+
attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
if (crtc_state->stream) {
@@ -1468,6 +1475,15 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
drm_plane_create_blend_mode_property(plane, blend_caps);
}
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
+ drm_plane_create_zpos_immutable_property(plane, 0);
+ } else if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
+ unsigned int zpos = 1 + drm_plane_index(plane);
+ drm_plane_create_zpos_property(plane, zpos, 1, 254);
+ } else if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ drm_plane_create_zpos_immutable_property(plane, 255);
+ }
+
if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
plane_cap &&
(plane_cap->pixel_format_support.nv12 ||
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 69ffd4424dc7..1b8c2aef4633 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -78,3 +78,4 @@ DC_EDID += dc_edid_parser.o
AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB))
AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID))
AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) $(AMD_DISPLAY_EDID)
+
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index c435f7632e8e..5ee87965a078 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -157,7 +157,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
int32_t N;
int32_t j;
- if (!pipe_ctx->stream)
+ if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER))
continue;
/* Virtual encoders don't have this function */
if (!stream_enc->funcs->get_fifo_cal_average_level)
@@ -188,7 +188,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
int32_t N;
int32_t j;
- if (!pipe_ctx->stream)
+ if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER))
continue;
/* Virtual encoders don't have this function */
if (!stream_enc->funcs->get_fifo_cal_average_level)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
index 3e0da873cf4c..1042cf1a3ab0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
@@ -32,6 +32,7 @@
#define MAX_INSTANCE 6
#define MAX_SEGMENT 6
+#define SMU_REGISTER_WRITE_RETRY_COUNT 5
struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
@@ -132,6 +133,8 @@ static int dcn315_smu_send_msg_with_param(
unsigned int msg_id, unsigned int param)
{
uint32_t result;
+ uint32_t i = 0;
+ uint32_t read_back_data;
result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000);
@@ -148,10 +151,19 @@ static int dcn315_smu_send_msg_with_param(
/* Set the parameter register for the SMU message, unit is Mhz */
REG_WRITE(MP1_SMN_C2PMSG_37, param);
- /* Trigger the message transaction by writing the message ID */
- generic_write_indirect_reg(CTX,
- REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
- mmMP1_C2PMSG_3, msg_id);
+ for (i = 0; i < SMU_REGISTER_WRITE_RETRY_COUNT; i++) {
+ /* Trigger the message transaction by writing the message ID */
+ generic_write_indirect_reg(CTX,
+ REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
+ mmMP1_C2PMSG_3, msg_id);
+ read_back_data = generic_read_indirect_reg(CTX,
+ REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
+ mmMP1_C2PMSG_3);
+ if (read_back_data == msg_id)
+ break;
+ udelay(2);
+ smu_print("SMU msg id write fail %x times. \n", i + 1);
+ }
result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 984b52923534..e9345f6554db 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -355,7 +355,7 @@ static void dcn32_update_clocks_update_dentist(
int32_t N;
int32_t j;
- if (!pipe_ctx->stream)
+ if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER))
continue;
/* Virtual encoders don't have this function */
if (!stream_enc->funcs->get_fifo_cal_average_level)
@@ -401,7 +401,7 @@ static void dcn32_update_clocks_update_dentist(
int32_t N;
int32_t j;
- if (!pipe_ctx->stream)
+ if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER))
continue;
/* Virtual encoders don't have this function */
if (!stream_enc->funcs->get_fifo_cal_average_level)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 566d7045b2de..3a9077b60029 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2073,12 +2073,12 @@ enum dc_status dc_commit_streams(struct dc *dc,
}
}
- /* Check for case where we are going from odm 2:1 to max
- * pipe scenario. For these cases, we will call
- * commit_minimal_transition_state() to exit out of odm 2:1
- * first before processing new streams
+ /* ODM Combine 2:1 power optimization is only applied for single stream
+ * scenario, it uses extra pipes than needed to reduce power consumption
+ * We need to switch off this feature to make room for new streams.
*/
- if (stream_count == dc->res_pool->pipe_count) {
+ if (stream_count > dc->current_state->stream_count &&
+ dc->current_state->stream_count == 1) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->next_odm_pipe)
@@ -3501,6 +3501,45 @@ static void commit_planes_for_stream_fast(struct dc *dc,
top_pipe_to_program->stream->update_flags.raw = 0;
}
+static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
+{
+/*
+ * This function calls HWSS to wait for any potentially double buffered
+ * operations to complete. It should be invoked as a pre-amble prior
+ * to full update programming before asserting any HW locks.
+ */
+ int pipe_idx;
+ int opp_inst;
+ int opp_count = dc->res_pool->pipe_count;
+ struct hubp *hubp;
+ int mpcc_inst;
+ const struct pipe_ctx *pipe_ctx;
+
+ for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
+ pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
+
+ if (!pipe_ctx->stream)
+ continue;
+
+ if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
+ pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
+
+ hubp = pipe_ctx->plane_res.hubp;
+ if (!hubp)
+ continue;
+
+ mpcc_inst = hubp->inst;
+ // MPCC inst is equal to pipe index in practice
+ for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
+ if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
+ dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
+ dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
+ break;
+ }
+ }
+ }
+}
+
static void commit_planes_for_stream(struct dc *dc,
struct dc_surface_update *srf_updates,
int surface_count,
@@ -3519,24 +3558,9 @@ static void commit_planes_for_stream(struct dc *dc,
// dc->current_state anymore, so we have to cache it before we apply
// the new SubVP context
subvp_prev_use = false;
-
-
dc_z10_restore(dc);
-
- if (update_type == UPDATE_TYPE_FULL) {
- /* wait for all double-buffer activity to clear on all pipes */
- int pipe_idx;
-
- for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
-
- if (!pipe_ctx->stream)
- continue;
-
- if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
- pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
- }
- }
+ if (update_type == UPDATE_TYPE_FULL)
+ wait_for_outstanding_hw_updates(dc, context);
if (update_type == UPDATE_TYPE_FULL) {
dc_allow_idle_optimizations(dc, false);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
index 30c0644d4418..be5a6d008b29 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
@@ -169,11 +169,23 @@ static void add_link_enc_assignment(
/* Return first available DIG link encoder. */
static enum engine_id find_first_avail_link_enc(
const struct dc_context *ctx,
- const struct dc_state *state)
+ const struct dc_state *state,
+ enum engine_id eng_id_requested)
{
enum engine_id eng_id = ENGINE_ID_UNKNOWN;
int i;
+ if (eng_id_requested != ENGINE_ID_UNKNOWN) {
+
+ for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
+ eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i];
+ if (eng_id == eng_id_requested)
+ return eng_id;
+ }
+ }
+
+ eng_id = ENGINE_ID_UNKNOWN;
+
for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i];
if (eng_id != ENGINE_ID_UNKNOWN)
@@ -287,7 +299,7 @@ void link_enc_cfg_link_encs_assign(
struct dc_stream_state *streams[],
uint8_t stream_count)
{
- enum engine_id eng_id = ENGINE_ID_UNKNOWN;
+ enum engine_id eng_id = ENGINE_ID_UNKNOWN, eng_id_req = ENGINE_ID_UNKNOWN;
int i;
int j;
@@ -377,8 +389,14 @@ void link_enc_cfg_link_encs_assign(
* assigned to that endpoint.
*/
link_enc = get_link_enc_used_by_link(state, stream->link);
- if (link_enc == NULL)
- eng_id = find_first_avail_link_enc(stream->ctx, state);
+ if (link_enc == NULL) {
+
+ if (stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ stream->link->dpia_preferred_eng_id != ENGINE_ID_UNKNOWN)
+ eng_id_req = stream->link->dpia_preferred_eng_id;
+
+ eng_id = find_first_avail_link_enc(stream->ctx, state, eng_id_req);
+ }
else
eng_id = link_enc->preferred_engine;
@@ -402,7 +420,9 @@ void link_enc_cfg_link_encs_assign(
DC_LOG_DEBUG("%s: CUR %s(%d) - enc_id(%d)\n",
__func__,
assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? "PHY" : "DPIA",
- assignment.ep_id.link_id.enum_id - 1,
+ assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ?
+ assignment.ep_id.link_id.enum_id :
+ assignment.ep_id.link_id.enum_id - 1,
assignment.eng_id);
}
for (i = 0; i < MAX_PIPES; i++) {
@@ -413,7 +433,9 @@ void link_enc_cfg_link_encs_assign(
DC_LOG_DEBUG("%s: NEW %s(%d) - enc_id(%d)\n",
__func__,
assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? "PHY" : "DPIA",
- assignment.ep_id.link_id.enum_id - 1,
+ assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ?
+ assignment.ep_id.link_id.enum_id :
+ assignment.ep_id.link_id.enum_id - 1,
assignment.eng_id);
}
@@ -478,7 +500,6 @@ struct dc_link *link_enc_cfg_get_link_using_link_enc(
if (stream)
link = stream->link;
- // dm_output_to_console("%s: No link using DIG(%d).\n", __func__, eng_id);
return link;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 0d0bef8eb331..31e3183497a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -1496,6 +1496,7 @@ struct dc_link {
* object creation.
*/
enum engine_id eng_id;
+ enum engine_id dpia_preferred_eng_id;
bool test_pattern_enabled;
enum dp_test_pattern current_test_pattern;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index ad967b58d7be..2a6157555fd1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -964,7 +964,9 @@ void dce110_edp_backlight_control(
return;
}
- if (link->panel_cntl) {
+ if (link->panel_cntl && !(link->dpcd_sink_ext_caps.bits.oled ||
+ link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
+ link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) {
bool is_backlight_on = link->panel_cntl->funcs->is_panel_backlight_on(link->panel_cntl);
if ((enable && is_backlight_on) || (!enable && !is_backlight_on)) {
@@ -1176,12 +1178,15 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
dto_params.otg_inst = tg->inst;
dto_params.timing = &pipe_ctx->stream->timing;
dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
- dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
- dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
- } else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST && dccg->funcs->disable_symclk_se)
+ if (dccg) {
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
+ dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
+ }
+ } else if (dccg && dccg->funcs->disable_symclk_se) {
dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
link_enc->transmitter - TRANSMITTER_UNIPHY_A);
+ }
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
/* TODO: This looks like a bug to me as we are disabling HPO IO when
@@ -2656,11 +2661,11 @@ void dce110_prepare_bandwidth(
struct clk_mgr *dccg = dc->clk_mgr;
dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
-
- dccg->funcs->update_clocks(
- dccg,
- context,
- false);
+ if (dccg)
+ dccg->funcs->update_clocks(
+ dccg,
+ context,
+ false);
}
void dce110_optimize_bandwidth(
@@ -2671,10 +2676,11 @@ void dce110_optimize_bandwidth(
dce110_set_displaymarks(dc, context);
- dccg->funcs->update_clocks(
- dccg,
- context,
- true);
+ if (dccg)
+ dccg->funcs->update_clocks(
+ dccg,
+ context,
+ true);
}
static void dce110_program_front_end_for_pipe(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 65fa9e21ad9c..aeadc587433f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1106,29 +1106,6 @@ void dcn20_blank_pixel_data(
v_active,
offset);
- if (!blank && dc->debug.enable_single_display_2to1_odm_policy) {
- /* when exiting dynamic ODM need to reinit DPG state for unused pipes */
- struct pipe_ctx *old_odm_pipe = dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx].next_odm_pipe;
-
- odm_pipe = pipe_ctx->next_odm_pipe;
-
- while (old_odm_pipe) {
- if (!odm_pipe || old_odm_pipe->pipe_idx != odm_pipe->pipe_idx)
- dc->hwss.set_disp_pattern_generator(dc,
- old_odm_pipe,
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
- CONTROLLER_DP_COLOR_SPACE_UDEFINED,
- COLOR_DEPTH_888,
- NULL,
- 0,
- 0,
- 0);
- old_odm_pipe = old_odm_pipe->next_odm_pipe;
- if (odm_pipe)
- odm_pipe = odm_pipe->next_odm_pipe;
- }
- }
-
if (!blank)
if (stream_res->abm) {
dc->hwss.set_pipe(pipe_ctx);
@@ -1584,17 +1561,6 @@ static void dcn20_update_dchubp_dpp(
|| plane_state->update_flags.bits.global_alpha_change
|| plane_state->update_flags.bits.per_pixel_alpha_change) {
// MPCC inst is equal to pipe index in practice
- int mpcc_inst = hubp->inst;
- int opp_inst;
- int opp_count = dc->res_pool->pipe_count;
-
- for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
- if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
- dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
- dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
- break;
- }
- }
hws->funcs.update_mpcc(dc, pipe_ctx);
}
@@ -1722,11 +1688,16 @@ static void dcn20_program_pipe(
struct dc_state *context)
{
struct dce_hwseq *hws = dc->hwseq;
- /* Only need to unblank on top pipe */
- if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level)
- && !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
- hws->funcs.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible);
+ /* Only need to unblank on top pipe */
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.odm ||
+ pipe_ctx->stream->update_flags.bits.abm_level)
+ hws->funcs.blank_pixel_data(dc, pipe_ctx,
+ !pipe_ctx->plane_state ||
+ !pipe_ctx->plane_state->visible);
+ }
/* Only update TG on top pipe */
if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
@@ -2721,8 +2692,6 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
struct dce_hwseq *hws = dc->hwseq;
unsigned int k1_div = PIXEL_RATE_DIV_NA;
unsigned int k2_div = PIXEL_RATE_DIV_NA;
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
- struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
if (dc->hwseq->funcs.setup_hpo_hw_control)
@@ -2742,10 +2711,8 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
- } else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST && dccg->funcs->enable_symclk_se)
- dccg->funcs->enable_symclk_se(dccg,
- stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A);
-
+ } else {
+ }
if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) {
hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index 6cef62d7a2e5..255713ec29bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -987,3 +987,20 @@ void dcn30_prepare_bandwidth(struct dc *dc,
}
}
+void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_params *params)
+{
+ unsigned int i;
+ unsigned int triggers = 0;
+
+ if (params->triggers.surface_update)
+ triggers |= 0x100;
+ if (params->triggers.cursor_update)
+ triggers |= 0x8;
+ if (params->triggers.force_trigger)
+ triggers |= 0x1;
+
+ for (i = 0; i < num_pipes; i++)
+ pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg,
+ triggers, params->num_frames);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
index a24a8e33a3d2..ce19c54097f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
@@ -87,5 +87,7 @@ void dcn30_set_hubp_blank(const struct dc *dc,
void dcn30_prepare_bandwidth(struct dc *dc,
struct dc_state *context);
+void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_params *params);
#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index 3d19acaa12f3..0de8b2783cf6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -64,7 +64,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn10_set_static_screen_control,
+ .set_static_screen_control = dcn30_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
index 257df8660b4c..61205cdbe2d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
@@ -75,6 +75,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
.set_cursor_position = dcn10_set_cursor_position,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index fc25cc300a17..1d7bc1e39afe 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -67,7 +67,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn10_set_static_screen_control,
+ .set_static_screen_control = dcn30_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
index ca8fe55c33b8..4ef85c3a0688 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
@@ -69,7 +69,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn10_set_static_screen_control,
+ .set_static_screen_control = dcn30_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 1c1fb2fa0822..004beed9bd44 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -1032,6 +1032,28 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
+/* ========================================================== */
+
+/*
+ * DPIA index | Preferred Encoder | Host Router
+ * 0 | C | 0
+ * 1 | First Available | 0
+ * 2 | D | 1
+ * 3 | First Available | 1
+ */
+/* ========================================================== */
+static const enum engine_id dpia_to_preferred_enc_id_table[] = {
+ ENGINE_ID_DIGC,
+ ENGINE_ID_DIGC,
+ ENGINE_ID_DIGD,
+ ENGINE_ID_DIGD
+};
+
+static enum engine_id dcn314_get_preferred_eng_id_dpia(unsigned int dpia_index)
+{
+ return dpia_to_preferred_enc_id_table[dpia_index];
+}
+
static struct dce_i2c_hw *dcn31_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
@@ -1785,6 +1807,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
.update_bw_bounding_box = dcn314_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn314_get_panel_config_defaults,
+ .get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia,
};
static struct clock_source *dcn30_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
index 777b2fac20c4..c7417147dff1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
@@ -65,7 +65,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn10_set_static_screen_control,
+ .set_static_screen_control = dcn30_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
index 3082da04a63d..1d052f08aff5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
@@ -75,7 +75,7 @@ void mpc32_power_on_blnd_lut(
if (power_on) {
REG_UPDATE(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_FORCE, 0);
REG_WAIT(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_STATE, 0, 1, 5);
- } else {
+ } else if (!mpc->ctx->dc->debug.disable_mem_low_power) {
ASSERT(false);
/* TODO: change to mpc
* dpp_base->ctx->dc->optimized_required = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 935cd23e6a01..f9d601c8c721 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -2564,18 +2564,128 @@ static int find_optimal_free_pipe_as_secondary_dpp_pipe(
return free_pipe_idx;
}
+static struct pipe_ctx *find_idle_secondary_pipe_check_mpo(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ const struct pipe_ctx *primary_pipe)
+{
+ int i;
+ struct pipe_ctx *secondary_pipe = NULL;
+ struct pipe_ctx *next_odm_mpo_pipe = NULL;
+ int primary_index, preferred_pipe_idx;
+ struct pipe_ctx *old_primary_pipe = NULL;
+
+ /*
+ * Modified from find_idle_secondary_pipe
+ * With windowed MPO and ODM, we want to avoid the case where we want a
+ * free pipe for the left side but the free pipe is being used on the
+ * right side.
+ * Add check on current_state if the primary_pipe is the left side,
+ * to check the right side ( primary_pipe->next_odm_pipe ) to see if
+ * it is using a pipe for MPO ( primary_pipe->next_odm_pipe->bottom_pipe )
+ * - If so, then don't use this pipe
+ * EXCEPTION - 3 plane ( 2 MPO plane ) case
+ * - in this case, the primary pipe has already gotten a free pipe for the
+ * MPO window in the left
+ * - when it tries to get a free pipe for the MPO window on the right,
+ * it will see that it is already assigned to the right side
+ * ( primary_pipe->next_odm_pipe ). But in this case, we want this
+ * free pipe, since it will be for the right side. So add an
+ * additional condition, that skipping the free pipe on the right only
+ * applies if the primary pipe has no bottom pipe currently assigned
+ */
+ if (primary_pipe) {
+ primary_index = primary_pipe->pipe_idx;
+ old_primary_pipe = &primary_pipe->stream->ctx->dc->current_state->res_ctx.pipe_ctx[primary_index];
+ if ((old_primary_pipe->next_odm_pipe) && (old_primary_pipe->next_odm_pipe->bottom_pipe)
+ && (!primary_pipe->bottom_pipe))
+ next_odm_mpo_pipe = old_primary_pipe->next_odm_pipe->bottom_pipe;
+
+ preferred_pipe_idx = (pool->pipe_count - 1) - primary_pipe->pipe_idx;
+ if ((res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) &&
+ !(next_odm_mpo_pipe && next_odm_mpo_pipe->pipe_idx == preferred_pipe_idx)) {
+ secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
+ secondary_pipe->pipe_idx = preferred_pipe_idx;
+ }
+ }
+
+ /*
+ * search backwards for the second pipe to keep pipe
+ * assignment more consistent
+ */
+ if (!secondary_pipe)
+ for (i = pool->pipe_count - 1; i >= 0; i--) {
+ if ((res_ctx->pipe_ctx[i].stream == NULL) &&
+ !(next_odm_mpo_pipe && next_odm_mpo_pipe->pipe_idx == i)) {
+ secondary_pipe = &res_ctx->pipe_ctx[i];
+ secondary_pipe->pipe_idx = i;
+ break;
+ }
+ }
+
+ return secondary_pipe;
+}
+
+static struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
+ struct dc_state *state,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream,
+ const struct pipe_ctx *head_pipe)
+{
+ struct resource_context *res_ctx = &state->res_ctx;
+ struct pipe_ctx *idle_pipe, *pipe;
+ struct resource_context *old_ctx = &stream->ctx->dc->current_state->res_ctx;
+ int head_index;
+
+ if (!head_pipe)
+ ASSERT(0);
+
+ /*
+ * Modified from dcn20_acquire_idle_pipe_for_layer
+ * Check if head_pipe in old_context already has bottom_pipe allocated.
+ * - If so, check if that pipe is available in the current context.
+ * -- If so, reuse pipe from old_context
+ */
+ head_index = head_pipe->pipe_idx;
+ pipe = &old_ctx->pipe_ctx[head_index];
+ if (pipe->bottom_pipe && res_ctx->pipe_ctx[pipe->bottom_pipe->pipe_idx].stream == NULL) {
+ idle_pipe = &res_ctx->pipe_ctx[pipe->bottom_pipe->pipe_idx];
+ idle_pipe->pipe_idx = pipe->bottom_pipe->pipe_idx;
+ } else {
+ idle_pipe = find_idle_secondary_pipe_check_mpo(res_ctx, pool, head_pipe);
+ if (!idle_pipe)
+ return NULL;
+ }
+
+ idle_pipe->stream = head_pipe->stream;
+ idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
+ idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
+
+ idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
+ idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
+ idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
+ idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst;
+
+ return idle_pipe;
+}
+
struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe(
const struct dc_state *cur_ctx,
struct dc_state *new_ctx,
const struct resource_pool *pool,
const struct pipe_ctx *opp_head_pipe)
{
- int free_pipe_idx =
- find_optimal_free_pipe_as_secondary_dpp_pipe(
- &cur_ctx->res_ctx, &new_ctx->res_ctx,
- pool, opp_head_pipe);
+
+ int free_pipe_idx;
struct pipe_ctx *free_pipe;
+ if (!opp_head_pipe->stream->ctx->dc->config.enable_windowed_mpo_odm)
+ return dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
+ new_ctx, pool, opp_head_pipe->stream, opp_head_pipe);
+
+ free_pipe_idx = find_optimal_free_pipe_as_secondary_dpp_pipe(
+ &cur_ctx->res_ctx, &new_ctx->res_ctx,
+ pool, opp_head_pipe);
if (free_pipe_idx >= 0) {
free_pipe = &new_ctx->res_ctx.pipe_ctx[free_pipe_idx];
free_pipe->pipe_idx = free_pipe_idx;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 8afda5ecc0cd..5805fb02af14 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -1099,6 +1099,11 @@ void dcn20_calculate_dlg_params(struct dc *dc,
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
+ if (dc->ctx->dce_version < DCN_VERSION_3_1 &&
+ context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
+ dcn20_adjust_freesync_v_startup(
+ &context->res_ctx.pipe_ctx[i].stream->timing,
+ &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
pipe_idx++;
}
@@ -1927,7 +1932,6 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
int vlevel = 0;
int pipe_split_from[MAX_PIPES];
int pipe_cnt = 0;
- int i = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
DC_LOGGER_INIT(dc->ctx->logger);
@@ -1951,15 +1955,6 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
- if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
- dcn20_adjust_freesync_v_startup(
- &context->res_ctx.pipe_ctx[i].stream->timing,
- &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
- }
-
BW_VAL_TRACE_END_WATERMARKS();
goto validate_out;
@@ -2232,7 +2227,6 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc,
int vlevel = 0;
int pipe_split_from[MAX_PIPES];
int pipe_cnt = 0;
- int i = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
DC_LOGGER_INIT(dc->ctx->logger);
@@ -2261,15 +2255,6 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc,
dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
- if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
- dcn20_adjust_freesync_v_startup(
- &context->res_ctx.pipe_ctx[i].stream->timing,
- &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
- }
-
BW_VAL_TRACE_END_WATERMARKS();
goto validate_out;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index 07adb614366e..fb21572750e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -293,6 +293,17 @@ static unsigned int micro_sec_to_vert_lines(unsigned int num_us, struct dc_crtc_
return num_lines;
}
+static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
+{
+ unsigned int v_active = 0, v_blank = 0, v_back_porch = 0;
+
+ v_active = timing->v_border_top + timing->v_addressable + timing->v_border_bottom;
+ v_blank = timing->v_total - v_active;
+ v_back_porch = v_blank - timing->v_front_porch - timing->v_sync_width;
+
+ return v_back_porch;
+}
+
int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
@@ -310,6 +321,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
unsigned int num_lines = 0;
+ unsigned int v_back_porch = 0;
if (!res_ctx->pipe_ctx[i].stream)
continue;
@@ -323,9 +335,16 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
else
pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
+ v_back_porch = get_vertical_back_porch(timing);
+
pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive;
pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, num_lines);
- pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width);
+ // vblank_nom should not smaller than (VSync (timing->v_sync_width + v_back_porch) + 2)
+ // + 2 is because
+ // 1 -> VStartup_start should be 1 line before VSync
+ // 1 -> always reserve 1 line between start of vblank to vstartup signal
+ pipes[pipe_cnt].pipe.dest.vblank_nom =
+ max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width + v_back_porch + 2);
pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom);
if (pipe->plane_state &&
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 027aec70c070..eaad1260bfd1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -65,6 +65,7 @@ struct resource_context;
struct clk_bw_params;
struct resource_funcs {
+ enum engine_id (*get_preferred_eng_id_dpia)(unsigned int dpia_index);
void (*destroy)(struct resource_pool **pool);
void (*link_init)(struct dc_link *link);
struct panel_cntl*(*panel_cntl_create)(
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index 195ca9e52eda..0895742a3102 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -791,6 +791,10 @@ static bool construct_dpia(struct dc_link *link,
/* Set dpia port index : 0 to number of dpia ports */
link->ddc_hw_inst = init_params->connector_index;
+ // Assign Dpia preferred eng_id
+ if (link->dc->res_pool->funcs->get_preferred_eng_id_dpia)
+ link->dpia_preferred_eng_id = link->dc->res_pool->funcs->get_preferred_eng_id_dpia(link->ddc_hw_inst);
+
/* TODO: Create link encoder */
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index dbd60811f95d..ef3a67409021 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -338,7 +338,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
* - Delta for CEIL: delta_from_mid_point_in_us_1
* - Delta for FLOOR: delta_from_mid_point_in_us_2
*/
- if ((last_render_time_in_us / mid_point_frames_ceil) < in_out_vrr->min_duration_in_us) {
+ if (mid_point_frames_ceil &&
+ (last_render_time_in_us / mid_point_frames_ceil) <
+ in_out_vrr->min_duration_in_us) {
/* Check for out of range.
* If using CEIL produces a value that is out of range,
* then we are forced to use FLOOR.
@@ -385,8 +387,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Either we've calculated the number of frames to insert,
* or we need to insert min duration frames
*/
- if (last_render_time_in_us / frames_to_insert <
- in_out_vrr->min_duration_in_us){
+ if (frames_to_insert &&
+ (last_render_time_in_us / frames_to_insert) <
+ in_out_vrr->min_duration_in_us){
frames_to_insert -= (frames_to_insert > 1) ?
1 : 0;
}
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index abe829bbd54a..67d7b7ee8a2a 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -240,6 +240,7 @@ enum DC_FEATURE_MASK {
DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default
DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default
+ DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4
};
enum DC_DEBUG_MASK {
@@ -250,6 +251,7 @@ enum DC_DEBUG_MASK {
DC_DISABLE_PSR = 0x10,
DC_FORCE_SUBVP_MCLK_SWITCH = 0x20,
DC_DISABLE_MPO = 0x40,
+ DC_DISABLE_REPLAY = 0x50,
DC_ENABLE_DPIA_TRACE = 0x80,
};
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index e68c1e280322..fa7d6ced786f 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -3117,6 +3117,24 @@ enum atom_umc_config1_def {
UMC_CONFIG1__ENABLE_ECC_CAPABLE = 0x00010000,
};
+struct atom_umc_info_v4_0 {
+ struct atom_common_table_header table_header;
+ uint32_t ucode_reserved[5];
+ uint8_t umcip_min_ver;
+ uint8_t umcip_max_ver;
+ uint8_t vram_type;
+ uint8_t umc_config;
+ uint32_t mem_refclk_10khz;
+ uint32_t clk_reserved[4];
+ uint32_t golden_reserved;
+ uint32_t umc_config1;
+ uint32_t reserved[2];
+ uint8_t channel_num;
+ uint8_t channel_width;
+ uint8_t channel_reserve[2];
+ uint8_t umc_info_reserved[16];
+};
+
/*
***************************************************************************
Data Table vram_info structure
diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h
index f43e29722ef7..7a9d473d0917 100644
--- a/drivers/gpu/drm/amd/include/discovery.h
+++ b/drivers/gpu/drm/amd/include/discovery.h
@@ -30,7 +30,7 @@
#define GC_TABLE_ID 0x4347
#define HARVEST_TABLE_SIGNATURE 0x56524148
#define VCN_INFO_TABLE_ID 0x004E4356
-#define MALL_INFO_TABLE_ID 0x4D414C4C
+#define MALL_INFO_TABLE_ID 0x4C4C414D
typedef enum
{
@@ -280,6 +280,36 @@ struct gc_info_v2_0 {
uint32_t gc_num_packer_per_sc;
};
+struct gc_info_v2_1 {
+ struct gpu_info_header header;
+
+ uint32_t gc_num_se;
+ uint32_t gc_num_cu_per_sh;
+ uint32_t gc_num_sh_per_se;
+ uint32_t gc_num_rb_per_se;
+ uint32_t gc_num_tccs;
+ uint32_t gc_num_gprs;
+ uint32_t gc_num_max_gs_thds;
+ uint32_t gc_gs_table_depth;
+ uint32_t gc_gsprim_buff_depth;
+ uint32_t gc_parameter_cache_depth;
+ uint32_t gc_double_offchip_lds_buffer;
+ uint32_t gc_wave_size;
+ uint32_t gc_max_waves_per_simd;
+ uint32_t gc_max_scratch_slots_per_cu;
+ uint32_t gc_lds_size;
+ uint32_t gc_num_sc_per_se;
+ uint32_t gc_num_packer_per_sc;
+ /* new for v2_1 */
+ uint32_t gc_num_tcp_per_sh;
+ uint32_t gc_tcp_size_per_cu;
+ uint32_t gc_num_sdp_interface;
+ uint32_t gc_num_cu_per_sqc;
+ uint32_t gc_instruction_cache_size_per_sqc;
+ uint32_t gc_scalar_data_cache_size_per_sqc;
+ uint32_t gc_tcc_size;
+};
+
typedef struct harvest_info_header {
uint32_t signature; /* Table Signature */
uint32_t version; /* Table Version */
@@ -312,6 +342,12 @@ struct mall_info_v1_0 {
uint32_t reserved[5];
};
+struct mall_info_v2_0 {
+ struct mall_info_header header;
+ uint32_t mall_size_per_umc;
+ uint32_t reserved[8];
+};
+
#define VCN_INFO_TABLE_MAX_NUM_INSTANCES 4
struct vcn_info_header {
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 8433f99f6667..3b5a56585c4b 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -31,12 +31,12 @@
#include <linux/types.h>
#include <linux/bitmap.h>
#include <linux/dma-fence.h>
+#include "amdgpu_irq.h"
+#include "amdgpu_gfx.h"
struct pci_dev;
struct amdgpu_device;
-#define KGD_MAX_QUEUES 128
-
struct kfd_dev;
struct kgd_mem;
@@ -68,7 +68,7 @@ struct kfd_cu_info {
uint32_t wave_front_size;
uint32_t max_scratch_slots_per_cu;
uint32_t lds_size;
- uint32_t cu_bitmap[4][4];
+ uint32_t cu_bitmap[AMDGPU_MAX_GC_INSTANCES][4][4];
};
/* For getting GPU local memory information from KGD */
@@ -326,8 +326,7 @@ struct kfd2kgd_calls {
uint32_t wait_times,
uint32_t grace_period,
uint32_t *reg_offset,
- uint32_t *reg_data,
- uint32_t inst);
+ uint32_t *reg_data);
void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid,
int *wave_cnt, int *max_waves_per_cu, uint32_t inst);
void (*program_trap_handler_settings)(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 5b1d73b00ef7..8bb2da13826f 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -2040,6 +2040,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
*states = ATTR_STATE_SUPPORTED;
break;
default:
@@ -3311,8 +3312,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
(gc_ver != IP_VERSION(9, 4, 3)) &&
(attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
- attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
+ attr == &sensor_dev_attr_temp3_label.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp3_crit.dev_attr.attr))
return 0;
/* hotspot temperature for gc 9,4,3*/
@@ -3324,9 +3327,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* only SOC15 dGPUs support hotspot and mem temperatures */
if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0) ||
(gc_ver == IP_VERSION(9, 4, 3))) &&
- (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
- attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
- attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
+ (attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
@@ -3471,6 +3472,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
size = sizeof(uint32_t);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size))
seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
+ size = sizeof(uint32_t);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size))
+ seq_printf(m, "\t%u.%u W (current GPU)\n", query >> 8, query & 0xff);
size = sizeof(value);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 95eb8a5eb54f..5a52098bcf16 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1031,10 +1031,7 @@ struct pptable_funcs {
enum smu_feature_mask mask);
/**
- * @notify_display_change: Enable fast memory clock switching.
- *
- * Allows for fine grained memory clock switching but has more stringent
- * timing requirements.
+ * @notify_display_change: General interface call to let SMU know about DC change
*/
int (*notify_display_change)(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
index 10cff75b44d5..e2ee855c7748 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
@@ -138,7 +138,10 @@
#define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4A
#define PPSMC_MSG_SetPriorityDeltaGain 0x4B
#define PPSMC_MSG_AllowIHHostInterrupt 0x4C
-#define PPSMC_Message_Count 0x4D
+
+#define PPSMC_MSG_DALNotPresent 0x4E
+
+#define PPSMC_Message_Count 0x4F
//Debug Dump Message
#define DEBUGSMC_MSG_TestMessage 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index 252aef190c5c..9be4051c0865 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -123,7 +123,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
-#define SMU_METRICS_TABLE_VERSION 0x5
+#define SMU_METRICS_TABLE_VERSION 0x7
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@@ -198,7 +198,7 @@ typedef struct __attribute__((packed, aligned(4))) {
uint32_t SocketThmResidencyAcc;
uint32_t VrThmResidencyAcc;
uint32_t HbmThmResidencyAcc;
- uint32_t spare;
+ uint32_t GfxLockXCDMak;
// New Items at end to maintain driver compatibility
uint32_t GfxclkFrequency[8];
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index ae4f44c4b877..70a4a717fd3f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -83,13 +83,27 @@
#define PPSMC_MSG_GetMinGfxDpmFreq 0x32
#define PPSMC_MSG_GetMaxGfxDpmFreq 0x33
#define PPSMC_MSG_PrepareForDriverUnload 0x34
-#define PPSMC_Message_Count 0x35
+#define PPSMC_MSG_ReadThrottlerLimit 0x35
+#define PPSMC_MSG_QueryValidMcaCount 0x36
+#define PPSMC_MSG_McaBankDumpDW 0x37
+#define PPSMC_MSG_GetCTFLimit 0x38
+#define PPSMC_Message_Count 0x39
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
#define PPSMC_RESET_TYPE_DRIVER_MODE_2_RESET 0x2
#define PPSMC_RESET_TYPE_DRIVER_MODE_3_RESET 0x3
+//PPSMC Reset Types for driver msg argument
+#define PPSMC_THROTTLING_LIMIT_TYPE_SOCKET 0x1
+#define PPSMC_THROTTLING_LIMIT_TYPE_HBM 0x2
+
+//CTF/Throttle Limit types
+#define PPSMC_AID_THM_TYPE 0x1
+#define PPSMC_CCD_THM_TYPE 0x2
+#define PPSMC_XCD_THM_TYPE 0x3
+#define PPSMC_HBM_THM_TYPE 0x4
+
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_MSG;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index 297b70b9388f..e57265cf637c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -84,6 +84,7 @@
__SMU_DUMMY_MAP(SetTjMax), \
__SMU_DUMMY_MAP(SetFanTemperatureTarget), \
__SMU_DUMMY_MAP(PrepareMp1ForUnload), \
+ __SMU_DUMMY_MAP(GetCTFLimit), \
__SMU_DUMMY_MAP(DramLogSetDramAddrHigh), \
__SMU_DUMMY_MAP(DramLogSetDramAddrLow), \
__SMU_DUMMY_MAP(DramLogSetDramSize), \
@@ -245,7 +246,8 @@
__SMU_DUMMY_MAP(AllowGpo), \
__SMU_DUMMY_MAP(Mode2Reset), \
__SMU_DUMMY_MAP(RequestI2cTransaction), \
- __SMU_DUMMY_MAP(GetMetricsTable),
+ __SMU_DUMMY_MAP(GetMetricsTable), \
+ __SMU_DUMMY_MAP(DALNotPresent),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 4bb289f9b4b8..da2860da6018 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -2082,36 +2082,41 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
return ret;
}
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap,
uint32_t pcie_width_cap)
{
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
- u32 smu_pcie_arg;
+ uint8_t *table_member1, *table_member2;
+ uint32_t min_gen_speed, max_gen_speed;
+ uint32_t min_lane_width, max_lane_width;
+ uint32_t smu_pcie_arg;
int ret, i;
- /* PCIE gen speed and lane width override */
- if (!amdgpu_device_pcie_dynamic_switching_supported()) {
- if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap)
- pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1];
+ GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
+ GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
- if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap)
- pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1];
+ min_gen_speed = MAX(0, table_member1[0]);
+ max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
+ min_gen_speed = min_gen_speed > max_gen_speed ?
+ max_gen_speed : min_gen_speed;
+ min_lane_width = MAX(1, table_member2[0]);
+ max_lane_width = MIN(pcie_width_cap, table_member2[1]);
+ min_lane_width = min_lane_width > max_lane_width ?
+ max_lane_width : min_lane_width;
- /* Force all levels to use the same settings */
- for (i = 0; i < NUM_LINK_LEVELS; i++) {
- pcie_table->pcie_gen[i] = pcie_gen_cap;
- pcie_table->pcie_lane[i] = pcie_width_cap;
- }
+ if (!amdgpu_device_pcie_dynamic_switching_supported()) {
+ pcie_table->pcie_gen[0] = max_gen_speed;
+ pcie_table->pcie_lane[0] = max_lane_width;
} else {
- for (i = 0; i < NUM_LINK_LEVELS; i++) {
- if (pcie_table->pcie_gen[i] > pcie_gen_cap)
- pcie_table->pcie_gen[i] = pcie_gen_cap;
- if (pcie_table->pcie_lane[i] > pcie_width_cap)
- pcie_table->pcie_lane[i] = pcie_width_cap;
- }
+ pcie_table->pcie_gen[0] = min_gen_speed;
+ pcie_table->pcie_lane[0] = min_lane_width;
}
+ pcie_table->pcie_gen[1] = max_gen_speed;
+ pcie_table->pcie_lane[1] = max_lane_width;
for (i = 0; i < NUM_LINK_LEVELS; i++) {
smu_pcie_arg = (i << 16 |
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index f1282fc4b90a..0232adb95df3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -837,12 +837,8 @@ int smu_v13_0_notify_display_change(struct smu_context *smu)
{
int ret = 0;
- if (!smu->pm_enabled)
- return ret;
-
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
- smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
+ if (!amdgpu_device_has_dc_support(smu->adev))
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DALNotPresent, NULL);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 8b7403ba89d7..3903a47669e4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -162,6 +162,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
+ MSG_MAP(DALNotPresent, PPSMC_MSG_DALNotPresent, 0),
};
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
@@ -2687,6 +2688,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.send_hbm_bad_channel_flag = smu_v13_0_0_send_bad_mem_channel_flag,
.gpo_control = smu_v13_0_gpo_control,
.get_ecc_info = smu_v13_0_0_get_ecc_info,
+ .notify_display_change = smu_v13_0_notify_display_change,
};
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 6ed9cd0a1e4e..de80e191a92c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -132,6 +132,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 0),
MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0),
+ MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0),
};
static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = {
@@ -335,7 +336,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
/* Store one-time values in driver PPTable */
if (!pptable->Init) {
- while (retry--) {
+ while (--retry) {
ret = smu_v13_0_6_get_metrics_table(smu, NULL, true);
if (ret)
return ret;
@@ -2081,6 +2082,55 @@ out:
return ret;
}
+static int smu_v13_0_6_get_thermal_temperature_range(struct smu_context *smu,
+ struct smu_temperature_range *range)
+{
+ struct amdgpu_device *adev = smu->adev;
+ u32 aid_temp, xcd_temp, mem_temp;
+ uint32_t smu_version;
+ u32 ccd_temp = 0;
+ int ret;
+
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
+ if (!range)
+ return -EINVAL;
+
+ /*Check smu version, GetCtfLimit message only supported for smu version 85.69 or higher */
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (smu_version < 0x554500)
+ return 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
+ PPSMC_AID_THM_TYPE, &aid_temp);
+ if (ret)
+ goto failed;
+
+ if (adev->flags & AMD_IS_APU) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
+ PPSMC_CCD_THM_TYPE, &ccd_temp);
+ if (ret)
+ goto failed;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
+ PPSMC_XCD_THM_TYPE, &xcd_temp);
+ if (ret)
+ goto failed;
+
+ range->hotspot_crit_max = max3(aid_temp, xcd_temp, ccd_temp) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
+ PPSMC_HBM_THM_TYPE, &mem_temp);
+ if (ret)
+ goto failed;
+
+ range->mem_crit_max = mem_temp * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+failed:
+ return ret;
+}
+
static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -2108,8 +2158,7 @@ static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu)
{
- /* TODO: Enable this when FW support is added */
- return false;
+ return true;
}
static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu)
@@ -2177,6 +2226,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = smu_v13_0_6_get_gpu_metrics,
+ .get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range,
.mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
.mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported,
.mode1_reset = smu_v13_0_6_mode1_reset,
diff --git a/drivers/gpu/drm/ci/arm.config b/drivers/gpu/drm/ci/arm.config
new file mode 100644
index 000000000000..871f4de063ad
--- /dev/null
+++ b/drivers/gpu/drm/ci/arm.config
@@ -0,0 +1,69 @@
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_DEBUG_KERNEL=y
+
+CONFIG_CRYPTO_ZSTD=y
+CONFIG_ZRAM_MEMORY_TRACKING=y
+CONFIG_ZRAM_WRITEBACK=y
+CONFIG_ZRAM=y
+CONFIG_ZSMALLOC_STAT=y
+
+# abootimg with a 'dummy' rootfs fails with root=/dev/nfs
+CONFIG_BLK_DEV_INITRD=n
+
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+
+CONFIG_DRM=y
+CONFIG_DRM_ETNAVIV=y
+CONFIG_DRM_ROCKCHIP=y
+CONFIG_DRM_PANFROST=y
+CONFIG_DRM_LIMA=y
+CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_PWM_CROS_EC=y
+CONFIG_BACKLIGHT_PWM=y
+
+CONFIG_ROCKCHIP_CDN_DP=n
+
+CONFIG_SPI_ROCKCHIP=y
+CONFIG_PWM_ROCKCHIP=y
+CONFIG_PHY_ROCKCHIP_DP=y
+CONFIG_DWMAC_ROCKCHIP=y
+
+CONFIG_MFD_RK808=y
+CONFIG_REGULATOR_RK808=y
+CONFIG_RTC_DRV_RK808=y
+CONFIG_COMMON_CLK_RK808=y
+
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR=y
+
+CONFIG_REGULATOR_VCTRL=y
+
+CONFIG_KASAN=n
+CONFIG_KASAN_INLINE=n
+CONFIG_STACKTRACE=n
+
+CONFIG_TMPFS=y
+
+CONFIG_PROVE_LOCKING=n
+CONFIG_DEBUG_LOCKDEP=n
+CONFIG_SOFTLOCKUP_DETECTOR=n
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=n
+
+CONFIG_FW_LOADER_COMPRESS=y
+
+CONFIG_USB_USBNET=y
+CONFIG_NETDEVICES=y
+CONFIG_USB_NET_DRIVERS=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_NET_AX8817X=y
+CONFIG_USB_NET_SMSC95XX=y
+
+# TK1
+CONFIG_ARM_TEGRA_DEVFREQ=y
+
+# 32-bit build failure
+CONFIG_DRM_MSM=n
diff --git a/drivers/gpu/drm/ci/arm64.config b/drivers/gpu/drm/ci/arm64.config
new file mode 100644
index 000000000000..817e18ddfd4f
--- /dev/null
+++ b/drivers/gpu/drm/ci/arm64.config
@@ -0,0 +1,199 @@
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_DEBUG_KERNEL=y
+
+CONFIG_CRYPTO_ZSTD=y
+CONFIG_ZRAM_MEMORY_TRACKING=y
+CONFIG_ZRAM_WRITEBACK=y
+CONFIG_ZRAM=y
+CONFIG_ZSMALLOC_STAT=y
+
+# abootimg with a 'dummy' rootfs fails with root=/dev/nfs
+CONFIG_BLK_DEV_INITRD=n
+
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+
+CONFIG_DRM=y
+CONFIG_DRM_ROCKCHIP=y
+CONFIG_DRM_PANFROST=y
+CONFIG_DRM_LIMA=y
+CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_PANEL_EDP=y
+CONFIG_DRM_MSM=y
+CONFIG_DRM_ETNAVIV=y
+CONFIG_DRM_I2C_ADV7511=y
+CONFIG_PWM_CROS_EC=y
+CONFIG_BACKLIGHT_PWM=y
+
+CONFIG_ROCKCHIP_CDN_DP=n
+
+CONFIG_SPI_ROCKCHIP=y
+CONFIG_PWM_ROCKCHIP=y
+CONFIG_PHY_ROCKCHIP_DP=y
+CONFIG_DWMAC_ROCKCHIP=y
+CONFIG_STMMAC_ETH=y
+CONFIG_TYPEC_FUSB302=y
+CONFIG_TYPEC=y
+CONFIG_TYPEC_TCPM=y
+
+# MSM platform bits
+
+# For CONFIG_QCOM_LMH
+CONFIG_OF=y
+
+CONFIG_ARM_SMMU_QCOM=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_RPMHPD=y
+CONFIG_QCOM_RPMPD=y
+CONFIG_QCOM_OCMEM=y
+CONFIG_SDM_GPUCC_845=y
+CONFIG_SDM_VIDEOCC_845=y
+CONFIG_SDM_DISPCC_845=y
+CONFIG_SDM_LPASSCC_845=y
+CONFIG_SDM_CAMCC_845=y
+CONFIG_RESET_QCOM_PDC=y
+CONFIG_DRM_TI_SN65DSI86=y
+CONFIG_I2C_QCOM_GENI=y
+CONFIG_SPI_QCOM_GENI=y
+CONFIG_PHY_QCOM_QUSB2=y
+CONFIG_PHY_QCOM_QMP=y
+CONFIG_MSM_GCC_8996=y
+CONFIG_QCOM_CLK_APCC_MSM8996=y
+CONFIG_QCOM_LLCC=y
+CONFIG_QCOM_LMH=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_QCOM_WDT=y
+CONFIG_POWER_RESET_QCOM_PON=y
+CONFIG_RTC_DRV_PM8XXX=y
+CONFIG_INTERCONNECT=y
+CONFIG_INTERCONNECT_QCOM=y
+CONFIG_INTERCONNECT_QCOM_MSM8996=y
+CONFIG_INTERCONNECT_QCOM_SDM845=y
+CONFIG_INTERCONNECT_QCOM_MSM8916=y
+CONFIG_INTERCONNECT_QCOM_MSM8996=y
+CONFIG_INTERCONNECT_QCOM_OSM_L3=y
+CONFIG_INTERCONNECT_QCOM_SC7180=y
+CONFIG_INTERCONNECT_QCOM_SM8350=y
+CONFIG_CRYPTO_DEV_QCOM_RNG=y
+CONFIG_SC_DISPCC_7180=y
+CONFIG_SC_GPUCC_7180=y
+CONFIG_SM_GPUCC_8350=y
+CONFIG_QCOM_SPMI_ADC5=y
+CONFIG_DRM_PARADE_PS8640=y
+CONFIG_DRM_LONTIUM_LT9611UXC=y
+CONFIG_PHY_QCOM_USB_HS=y
+CONFIG_QCOM_GPI_DMA=y
+CONFIG_USB_ONBOARD_HUB=y
+CONFIG_NVMEM_QCOM_QFPROM=y
+CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2=y
+
+
+# db410c ethernet
+CONFIG_USB_RTL8152=y
+# db820c ethernet
+CONFIG_ATL1C=y
+# Chromebooks ethernet
+CONFIG_USB_ONBOARD_HUB=y
+# 888 HDK ethernet
+CONFIG_USB_LAN78XX=y
+
+CONFIG_ARCH_ALPINE=n
+CONFIG_ARCH_BCM2835=y
+CONFIG_ARCH_BCM_IPROC=n
+CONFIG_ARCH_BERLIN=n
+CONFIG_ARCH_BRCMSTB=n
+CONFIG_ARCH_EXYNOS=n
+CONFIG_ARCH_K3=n
+CONFIG_ARCH_LAYERSCAPE=n
+CONFIG_ARCH_LG1K=n
+CONFIG_ARCH_HISI=n
+CONFIG_ARCH_MVEBU=n
+CONFIG_ARCH_SEATTLE=n
+CONFIG_ARCH_SYNQUACER=n
+CONFIG_ARCH_RENESAS=n
+CONFIG_ARCH_R8A774A1=n
+CONFIG_ARCH_R8A774C0=n
+CONFIG_ARCH_R8A7795=n
+CONFIG_ARCH_R8A7796=n
+CONFIG_ARCH_R8A77965=n
+CONFIG_ARCH_R8A77970=n
+CONFIG_ARCH_R8A77980=n
+CONFIG_ARCH_R8A77990=n
+CONFIG_ARCH_R8A77995=n
+CONFIG_ARCH_STRATIX10=n
+CONFIG_ARCH_TEGRA=n
+CONFIG_ARCH_SPRD=n
+CONFIG_ARCH_THUNDER=n
+CONFIG_ARCH_THUNDER2=n
+CONFIG_ARCH_UNIPHIER=n
+CONFIG_ARCH_VEXPRESS=n
+CONFIG_ARCH_XGENE=n
+CONFIG_ARCH_ZX=n
+CONFIG_ARCH_ZYNQMP=n
+
+# Strip out some stuff we don't need for graphics testing, to reduce
+# the build.
+CONFIG_CAN=n
+CONFIG_WIRELESS=n
+CONFIG_RFKILL=n
+CONFIG_WLAN=n
+
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR=y
+
+CONFIG_REGULATOR_VCTRL=y
+
+CONFIG_KASAN=n
+CONFIG_KASAN_INLINE=n
+CONFIG_STACKTRACE=n
+
+CONFIG_TMPFS=y
+
+CONFIG_PROVE_LOCKING=n
+CONFIG_DEBUG_LOCKDEP=n
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+
+CONFIG_DETECT_HUNG_TASK=y
+
+CONFIG_FW_LOADER_COMPRESS=y
+CONFIG_FW_LOADER_USER_HELPER=n
+
+CONFIG_USB_USBNET=y
+CONFIG_NETDEVICES=y
+CONFIG_USB_NET_DRIVERS=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_NET_AX8817X=y
+CONFIG_USB_NET_SMSC95XX=y
+
+# For amlogic
+CONFIG_MESON_GXL_PHY=y
+CONFIG_MDIO_BUS_MUX_MESON_G12A=y
+CONFIG_DRM_MESON=y
+
+# For Mediatek
+CONFIG_DRM_MEDIATEK=y
+CONFIG_PWM_MEDIATEK=y
+CONFIG_DRM_MEDIATEK_HDMI=y
+CONFIG_GNSS=y
+CONFIG_GNSS_MTK_SERIAL=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MTK=y
+CONFIG_MTK_DEVAPC=y
+CONFIG_PWM_MTK_DISP=y
+CONFIG_MTK_CMDQ=y
+
+# For nouveau. Note that DRM must be a module so that it's loaded after NFS is up to provide the firmware.
+CONFIG_ARCH_TEGRA=y
+CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_TEGRA=m
+CONFIG_R8169=y
+CONFIG_STAGING=y
+CONFIG_DRM_TEGRA_STAGING=y
+CONFIG_TEGRA_HOST1X=y
+CONFIG_ARM_TEGRA_DEVFREQ=y
+CONFIG_TEGRA_SOCTHERM=y
+CONFIG_DRM_TEGRA_DEBUG=y
+CONFIG_PWM_TEGRA=y
diff --git a/drivers/gpu/drm/ci/build-igt.sh b/drivers/gpu/drm/ci/build-igt.sh
new file mode 100644
index 000000000000..500fa4f5c30a
--- /dev/null
+++ b/drivers/gpu/drm/ci/build-igt.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+# SPDX-License-Identifier: MIT
+
+set -ex
+
+git clone https://gitlab.freedesktop.org/drm/igt-gpu-tools.git --single-branch --no-checkout
+cd igt-gpu-tools
+git checkout $IGT_VERSION
+
+if [[ "$KERNEL_ARCH" = "arm" ]]; then
+ . ../.gitlab-ci/container/create-cross-file.sh armhf
+ EXTRA_MESON_ARGS="--cross-file /cross_file-armhf.txt"
+fi
+
+MESON_OPTIONS="-Doverlay=disabled \
+ -Dchamelium=disabled \
+ -Dvalgrind=disabled \
+ -Dman=enabled \
+ -Dtests=enabled \
+ -Drunner=enabled \
+ -Dlibunwind=enabled \
+ -Dprefix=/igt"
+
+mkdir -p /igt
+meson build $MESON_OPTIONS $EXTRA_MESON_ARGS
+ninja -C build -j${FDO_CI_CONCURRENT:-4} || ninja -C build -j 1
+ninja -C build install
+
+mkdir -p artifacts/
+tar -cf artifacts/igt.tar /igt
+
+# Pass needed files to the test stage
+S3_ARTIFACT_NAME="igt.tar.gz"
+gzip -c artifacts/igt.tar > ${S3_ARTIFACT_NAME}
+ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${KERNEL_ARCH}/${S3_ARTIFACT_NAME}
diff --git a/drivers/gpu/drm/ci/build.sh b/drivers/gpu/drm/ci/build.sh
new file mode 100644
index 000000000000..7b014287a041
--- /dev/null
+++ b/drivers/gpu/drm/ci/build.sh
@@ -0,0 +1,157 @@
+#!/bin/bash
+# SPDX-License-Identifier: MIT
+
+set -ex
+
+# Clean up stale rebases that GitLab might not have removed when reusing a checkout dir
+rm -rf .git/rebase-apply
+
+. .gitlab-ci/container/container_pre_build.sh
+
+# libssl-dev was uninstalled because it was considered an ephemeral package
+apt-get update
+apt-get install -y libssl-dev
+
+if [[ "$KERNEL_ARCH" = "arm64" ]]; then
+ GCC_ARCH="aarch64-linux-gnu"
+ DEBIAN_ARCH="arm64"
+ DEVICE_TREES="arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8016-sbc.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8096-db820c.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper-sku16.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r0.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r5.dtb"
+elif [[ "$KERNEL_ARCH" = "arm" ]]; then
+ GCC_ARCH="arm-linux-gnueabihf"
+ DEBIAN_ARCH="armhf"
+ DEVICE_TREES="arch/arm/boot/dts/rockchip/rk3288-veyron-jaq.dtb"
+ DEVICE_TREES+=" arch/arm/boot/dts/allwinner/sun8i-h3-libretech-all-h3-cc.dtb"
+ DEVICE_TREES+=" arch/arm/boot/dts/nxp/imx/imx6q-cubox-i.dtb"
+ apt-get install -y libssl-dev:armhf
+else
+ GCC_ARCH="x86_64-linux-gnu"
+ DEBIAN_ARCH="x86_64"
+ DEVICE_TREES=""
+fi
+
+export ARCH=${KERNEL_ARCH}
+export CROSS_COMPILE="${GCC_ARCH}-"
+
+# The kernel doesn't like the gold linker (or the old lld in our debians).
+# Sneak in some override symlinks during kernel build until we can update
+# debian.
+mkdir -p ld-links
+for i in /usr/bin/*-ld /usr/bin/ld; do
+ i=$(basename $i)
+ ln -sf /usr/bin/$i.bfd ld-links/$i
+done
+
+NEWPATH=$(pwd)/ld-links
+export PATH=$NEWPATH:$PATH
+
+git config --global user.email "fdo@example.com"
+git config --global user.name "freedesktop.org CI"
+git config --global pull.rebase true
+
+# Try to merge fixes from target repo
+if [ "$(git ls-remote --exit-code --heads ${UPSTREAM_REPO} ${TARGET_BRANCH}-external-fixes)" ]; then
+ git pull ${UPSTREAM_REPO} ${TARGET_BRANCH}-external-fixes
+fi
+
+# Try to merge fixes from local repo if this isn't a merge request
+if [ -z "$CI_MERGE_REQUEST_PROJECT_PATH" ]; then
+ if [ "$(git ls-remote --exit-code --heads origin ${TARGET_BRANCH}-external-fixes)" ]; then
+ git pull origin ${TARGET_BRANCH}-external-fixes
+ fi
+fi
+
+for opt in $ENABLE_KCONFIGS; do
+ echo CONFIG_$opt=y >> drivers/gpu/drm/ci/${KERNEL_ARCH}.config
+done
+for opt in $DISABLE_KCONFIGS; do
+ echo CONFIG_$opt=n >> drivers/gpu/drm/ci/${KERNEL_ARCH}.config
+done
+
+if [[ -n "${MERGE_FRAGMENT}" ]]; then
+ ./scripts/kconfig/merge_config.sh ${DEFCONFIG} drivers/gpu/drm/ci/${MERGE_FRAGMENT}
+else
+ make `basename ${DEFCONFIG}`
+fi
+
+make ${KERNEL_IMAGE_NAME}
+
+mkdir -p /lava-files/
+for image in ${KERNEL_IMAGE_NAME}; do
+ cp arch/${KERNEL_ARCH}/boot/${image} /lava-files/.
+done
+
+if [[ -n ${DEVICE_TREES} ]]; then
+ make dtbs
+ cp ${DEVICE_TREES} /lava-files/.
+fi
+
+make modules
+mkdir -p install/modules/
+INSTALL_MOD_PATH=install/modules/ make modules_install
+
+if [[ ${DEBIAN_ARCH} = "arm64" ]]; then
+ make Image.lzma
+ mkimage \
+ -f auto \
+ -A arm \
+ -O linux \
+ -d arch/arm64/boot/Image.lzma \
+ -C lzma\
+ -b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \
+ /lava-files/cheza-kernel
+ KERNEL_IMAGE_NAME+=" cheza-kernel"
+
+ # Make a gzipped copy of the Image for db410c.
+ gzip -k /lava-files/Image
+ KERNEL_IMAGE_NAME+=" Image.gz"
+fi
+
+# Pass needed files to the test stage
+mkdir -p install
+cp -rfv .gitlab-ci/* install/.
+cp -rfv install/common install/ci-common
+cp -rfv drivers/gpu/drm/ci/* install/.
+
+. .gitlab-ci/container/container_post_build.sh
+
+if [[ "$UPLOAD_TO_MINIO" = "1" ]]; then
+ xz -7 -c -T${FDO_CI_CONCURRENT:-4} vmlinux > /lava-files/vmlinux.xz
+ FILES_TO_UPLOAD="$KERNEL_IMAGE_NAME vmlinux.xz"
+
+ if [[ -n $DEVICE_TREES ]]; then
+ FILES_TO_UPLOAD="$FILES_TO_UPLOAD $(basename -a $DEVICE_TREES)"
+ fi
+
+ for f in $FILES_TO_UPLOAD; do
+ ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" /lava-files/$f \
+ https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/$f
+ done
+
+ S3_ARTIFACT_NAME="kernel-files.tar.zst"
+ tar --zstd -cf $S3_ARTIFACT_NAME install
+ ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/${S3_ARTIFACT_NAME}
+
+ echo "Download vmlinux.xz from https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/vmlinux.xz"
+fi
+
+mkdir -p artifacts/install/lib
+mv install/* artifacts/install/.
+rm -rf artifacts/install/modules
+ln -s common artifacts/install/ci-common
+
+for image in ${KERNEL_IMAGE_NAME}; do
+ cp /lava-files/$image artifacts/install/.
+done
+
+tar -C artifacts -cf artifacts/install.tar install
+rm -rf artifacts/install
diff --git a/drivers/gpu/drm/ci/build.yml b/drivers/gpu/drm/ci/build.yml
new file mode 100644
index 000000000000..e6503f1c5927
--- /dev/null
+++ b/drivers/gpu/drm/ci/build.yml
@@ -0,0 +1,110 @@
+.build:
+ extends:
+ - .build-rules
+ stage: build
+ artifacts:
+ paths:
+ - artifacts
+ script:
+ - FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash drivers/gpu/drm/ci/build.sh
+
+.build:arm32:
+ extends:
+ - .build
+ - .use-debian/arm64_build
+ tags:
+ - aarch64
+ variables:
+ DEFCONFIG: "arch/arm/configs/multi_v7_defconfig"
+ KERNEL_IMAGE_NAME: "zImage"
+ KERNEL_ARCH: "arm"
+
+.build:arm64:
+ extends:
+ - .build
+ - .use-debian/arm64_build
+ tags:
+ - aarch64
+ variables:
+ DEFCONFIG: "arch/arm64/configs/defconfig"
+ KERNEL_IMAGE_NAME: "Image"
+ KERNEL_ARCH: "arm64"
+
+.build:x86_64:
+ extends:
+ - .build
+ - .use-debian/x86_64_build
+ variables:
+ DEFCONFIG: "arch/x86/configs/x86_64_defconfig"
+ KERNEL_IMAGE_NAME: "bzImage"
+ KERNEL_ARCH: "x86_64"
+
+
+# Build IGT for testing on devices
+
+igt:arm32:
+ extends: .build:arm32
+ script:
+ - FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash drivers/gpu/drm/ci/build-igt.sh
+
+igt:arm64:
+ extends: .build:arm64
+ script:
+ - FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash drivers/gpu/drm/ci/build-igt.sh
+
+igt:x86_64:
+ extends: .build:x86_64
+ script:
+ - FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash drivers/gpu/drm/ci/build-igt.sh
+
+# Build kernels for testing on devices
+
+testing:arm32:
+ extends: .build:arm32
+ variables:
+ # Would be good to have DEBUG_KMEMLEAK, but it doesn't work well with any of
+ # PROVE_LOCKING and KASAN as of 5.17.
+ #
+ # db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
+ # becoming too big for their bootloaders.
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ UPLOAD_TO_MINIO: 1
+ MERGE_FRAGMENT: arm.config
+
+testing:arm64:
+ extends: .build:arm64
+ variables:
+ # Would be good to have DEBUG_KMEMLEAK, but it doesn't work well with any of
+ # PROVE_LOCKING and KASAN as of 5.17.
+ #
+ # db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
+ # becoming too big for their bootloaders.
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ UPLOAD_TO_MINIO: 1
+ MERGE_FRAGMENT: arm64.config
+
+testing:x86_64:
+ extends: .build:x86_64
+ variables:
+ # Would be good to have DEBUG_KMEMLEAK, but it doesn't work well with any of
+ # PROVE_LOCKING and KASAN as of 5.17.
+ #
+ # db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
+ # becoming too big for their bootloaders.
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ UPLOAD_TO_MINIO: 1
+ MERGE_FRAGMENT: x86_64.config
+
+
+# Jobs for build-testing different configurations
+
+build:arm32:
+ extends: .build:arm32
+
+build-nodebugfs:arm64:
+ extends: .build:arm64
+ variables:
+ DISABLE_KCONFIGS: "DEBUG_FS"
+
+build:x86_64:
+ extends: .build:x86_64
diff --git a/drivers/gpu/drm/ci/check-patch.py b/drivers/gpu/drm/ci/check-patch.py
new file mode 100755
index 000000000000..a5f399a20e25
--- /dev/null
+++ b/drivers/gpu/drm/ci/check-patch.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# check-patch.py: run checkpatch.pl across all commits in a branch
+#
+# Based on qemu/.gitlab-ci.d/check-patch.py
+#
+# Copyright (C) 2020 Red Hat, Inc.
+# Copyright (C) 2022 Collabora Ltd.
+
+import os
+import os.path
+import sys
+import subprocess
+
+repourl = "https://gitlab.freedesktop.org/%s.git" % os.environ["CI_MERGE_REQUEST_PROJECT_PATH"]
+
+# GitLab CI environment does not give us any direct info about the
+# base for the user's branch. We thus need to figure out a common
+# ancestor between the user's branch and current git master.
+os.environ["GIT_DEPTH"] = "1000"
+subprocess.call(["git", "remote", "remove", "check-patch"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+subprocess.check_call(["git", "remote", "add", "check-patch", repourl])
+subprocess.check_call(["git", "fetch", "check-patch", os.environ["CI_MERGE_REQUEST_TARGET_BRANCH_NAME"]],
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL)
+
+ancestor = subprocess.check_output(["git", "merge-base",
+ "check-patch/%s" % os.environ["CI_MERGE_REQUEST_TARGET_BRANCH_NAME"], "HEAD"],
+ universal_newlines=True)
+
+ancestor = ancestor.strip()
+
+log = subprocess.check_output(["git", "log", "--format=%H %s",
+ ancestor + "..."],
+ universal_newlines=True)
+
+subprocess.check_call(["git", "remote", "rm", "check-patch"])
+
+if log == "":
+ print("\nNo commits since %s, skipping checks\n" % ancestor)
+ sys.exit(0)
+
+errors = False
+
+print("\nChecking all commits since %s...\n" % ancestor, flush=True)
+
+ret = subprocess.run(["scripts/checkpatch.pl",
+ "--terse",
+ "--types", os.environ["CHECKPATCH_TYPES"],
+ "--git", ancestor + "..."])
+
+if ret.returncode != 0:
+ print(" ❌ FAIL one or more commits failed scripts/checkpatch.pl")
+ sys.exit(1)
+
+sys.exit(0)
diff --git a/drivers/gpu/drm/ci/container.yml b/drivers/gpu/drm/ci/container.yml
new file mode 100644
index 000000000000..9764e7921a4f
--- /dev/null
+++ b/drivers/gpu/drm/ci/container.yml
@@ -0,0 +1,65 @@
+.container:
+ variables:
+ CI_REPOSITORY_URL: ${DRM_CI_PROJECT_URL}.git # So ci-templates clones drm-ci instead of the repo to test
+ CI_COMMIT_SHA: ${DRM_CI_COMMIT_SHA}
+
+debian/x86_64_build-base:
+ variables:
+ EXTRA_LOCAL_PACKAGES: "libcairo-dev libdw-dev libjson-c-dev libkmod2 libkmod-dev libpciaccess-dev libproc2-dev libudev-dev libunwind-dev python3-docutils bc python3-ply libssl-dev bc"
+
+debian/x86_64_test-gl:
+ variables:
+ EXTRA_LOCAL_PACKAGES: "jq libasound2 libcairo2 libdw1 libglib2.0-0 libjson-c5 libkmod-dev libkmod2 libgles2 libproc2-dev"
+
+debian/arm64_build:
+ variables:
+ EXTRA_LOCAL_PACKAGES: "libcairo-dev libdw-dev libjson-c-dev libproc2-dev libkmod2 libkmod-dev libpciaccess-dev libudev-dev libunwind-dev python3-docutils libssl-dev crossbuild-essential-armhf libkmod-dev:armhf libproc2-dev:armhf libunwind-dev:armhf libdw-dev:armhf libpixman-1-dev:armhf libcairo-dev:armhf libudev-dev:armhf libjson-c-dev:armhf"
+
+.kernel+rootfs:
+ variables:
+ EXTRA_LOCAL_PACKAGES: "jq libasound2 libcairo2 libdw1 libglib2.0-0 libjson-c5"
+
+# Disable container jobs that we won't use
+alpine/x86_64_build:
+ rules:
+ - when: never
+
+debian/x86_64_test-vk:
+ rules:
+ - when: never
+
+fedora/x86_64_build:
+ rules:
+ - when: never
+
+debian/android_build:
+ rules:
+ - when: never
+
+debian/x86_64_test-android:
+ rules:
+ - when: never
+
+windows_build_vs2019:
+ rules:
+ - when: never
+
+windows_test_vs2019:
+ rules:
+ - when: never
+
+.debian/x86_64_build-mingw:
+ rules:
+ - when: never
+
+rustfmt:
+ rules:
+ - when: never
+
+windows_vs2019:
+ rules:
+ - when: never
+
+clang-format:
+ rules:
+ - when: never \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
new file mode 100644
index 000000000000..2c4df53f5dfe
--- /dev/null
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -0,0 +1,251 @@
+variables:
+ DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa
+ DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 0dc961645c4f0241f8512cb0ec3ad59635842072
+
+ UPSTREAM_REPO: git://anongit.freedesktop.org/drm/drm
+ TARGET_BRANCH: drm-next
+
+ IGT_VERSION: 471bfababd070e1dac0ebb87470ac4f2ae85e663
+
+ DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/anholt/deqp-runner.git
+ DEQP_RUNNER_GIT_TAG: v0.15.0
+
+ FDO_UPSTREAM_REPO: helen.fornazier/linux # The repo where the git-archive daily runs
+ MESA_TEMPLATES_COMMIT: &ci-templates-commit d5aa3941aa03c2f716595116354fb81eb8012acb
+ DRM_CI_PROJECT_URL: https://gitlab.freedesktop.org/${DRM_CI_PROJECT_PATH}
+ CI_PRE_CLONE_SCRIPT: |-
+ set -o xtrace
+ curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s ${DRM_CI_PROJECT_URL}/-/raw/${DRM_CI_COMMIT_SHA}/.gitlab-ci/download-git-cache.sh -o download-git-cache.sh
+ bash download-git-cache.sh
+ rm download-git-cache.sh
+ set +o xtrace
+ S3_HOST: s3.freedesktop.org
+ # per-pipeline artifact storage on MinIO
+ PIPELINE_ARTIFACTS_BASE: ${S3_HOST}/artifacts/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}
+ # per-job artifact storage on MinIO
+ JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID}
+
+ LAVA_JOB_PRIORITY: 30
+
+default:
+ before_script:
+ - export SCRIPTS_DIR=$(mktemp -d)
+ - curl -L -s --retry 4 -f --retry-all-errors --retry-delay 60 -O --output-dir "${SCRIPTS_DIR}" "${DRM_CI_PROJECT_URL}/-/raw/${DRM_CI_COMMIT_SHA}/.gitlab-ci/setup-test-env.sh"
+ - source ${SCRIPTS_DIR}/setup-test-env.sh
+ - echo -e "\e[0Ksection_start:$(date +%s):unset_env_vars_section[collapsed=true]\r\e[0KUnsetting vulnerable environment variables"
+ - export CI_JOB_JWT_FILE="${CI_JOB_JWT_FILE:-$(mktemp)}"
+ - echo -n "${CI_JOB_JWT}" > "${CI_JOB_JWT_FILE}"
+ - unset CI_JOB_JWT
+ - echo -e "\e[0Ksection_end:$(date +%s):unset_env_vars_section\r\e[0K"
+
+ - echo -e "\e[0Ksection_start:$(date +%s):drm_ci_download_section[collapsed=true]\r\e[0KDownloading mesa from $DRM_CI_PROJECT_URL/-/archive/$DRM_CI_COMMIT_SHA/mesa-$DRM_CI_COMMIT_SHA.tar.gz"
+ - cd $CI_PROJECT_DIR
+ - curl --output - $DRM_CI_PROJECT_URL/-/archive/$DRM_CI_COMMIT_SHA/mesa-$DRM_CI_COMMIT_SHA.tar.gz | tar -xz
+ - mv mesa-$DRM_CI_COMMIT_SHA/.gitlab-ci* .
+ - rm -rf mesa-$DRM_CI_COMMIT_SHA/
+ - echo -e "\e[0Ksection_end:$(date +%s):drm_ci_download_section\r\e[0K"
+
+ after_script:
+ - >
+ set +x
+
+ test -e "${CI_JOB_JWT_FILE}" &&
+ export CI_JOB_JWT="$(<${CI_JOB_JWT_FILE})" &&
+ rm "${CI_JOB_JWT_FILE}"
+
+ # Retry when job fails.
+ retry:
+ max: 1
+ # Ignore runner_unsupported, stale_schedule, archived_failure, or
+ # unmet_prerequisites
+ when:
+ - api_failure
+ - runner_system_failure
+ - script_failure
+ - job_execution_timeout
+ - scheduler_failure
+ - data_integrity_failure
+ - unknown_failure
+
+include:
+ - project: 'freedesktop/ci-templates'
+ ref: 16bc29078de5e0a067ff84a1a199a3760d3b3811
+ file:
+ - '/templates/ci-fairy.yml'
+ - project: 'freedesktop/ci-templates'
+ ref: *ci-templates-commit
+ file:
+ - '/templates/alpine.yml'
+ - '/templates/debian.yml'
+ - '/templates/fedora.yml'
+ - project: *drm-ci-project-path
+ ref: *drm-ci-commit-sha
+ file:
+ - '/.gitlab-ci/farm-rules.yml'
+ - '/.gitlab-ci/test-source-dep.yml'
+ - '/.gitlab-ci/container/gitlab-ci.yml'
+ - '/.gitlab-ci/test/gitlab-ci.yml'
+ - '/.gitlab-ci/lava/lava-gitlab-ci.yml'
+ - drivers/gpu/drm/ci/image-tags.yml
+ - drivers/gpu/drm/ci/container.yml
+ - drivers/gpu/drm/ci/static-checks.yml
+ - drivers/gpu/drm/ci/build.yml
+ - drivers/gpu/drm/ci/test.yml
+ - 'https://gitlab.freedesktop.org/gfx-ci/lab-status/-/raw/main/lab-status.yml'
+
+
+stages:
+ - sanity
+ - container
+ - git-archive
+ - build
+ - amdgpu
+ - i915
+ - mediatek
+ - meson
+ - msm
+ - rockchip
+ - virtio-gpu
+ - lint
+
+# YAML anchors for rule conditions
+# --------------------------------
+.rules-anchors:
+ rules:
+ # Pipeline for forked project branch
+ - if: &is-forked-branch '$CI_COMMIT_BRANCH && $CI_PROJECT_NAMESPACE != "mesa"'
+ when: manual
+ # Forked project branch / pre-merge pipeline not for Marge bot
+ - if: &is-forked-branch-or-pre-merge-not-for-marge '$CI_PROJECT_NAMESPACE != "mesa" || ($GITLAB_USER_LOGIN != "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event")'
+ when: manual
+ # Pipeline runs for the main branch of the upstream Mesa project
+ - if: &is-mesa-main '$CI_PROJECT_NAMESPACE == "mesa" && $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH && $CI_COMMIT_BRANCH'
+ when: always
+ # Post-merge pipeline
+ - if: &is-post-merge '$CI_PROJECT_NAMESPACE == "mesa" && $CI_COMMIT_BRANCH'
+ when: on_success
+ # Post-merge pipeline, not for Marge Bot
+ - if: &is-post-merge-not-for-marge '$CI_PROJECT_NAMESPACE == "mesa" && $GITLAB_USER_LOGIN != "marge-bot" && $CI_COMMIT_BRANCH'
+ when: on_success
+ # Pre-merge pipeline
+ - if: &is-pre-merge '$CI_PIPELINE_SOURCE == "merge_request_event"'
+ when: on_success
+ # Pre-merge pipeline for Marge Bot
+ - if: &is-pre-merge-for-marge '$GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"'
+ when: on_success
+
+# Rule to filter for only scheduled pipelines.
+.scheduled_pipeline-rules:
+ rules:
+ - if: &is-scheduled-pipeline '$CI_PIPELINE_SOURCE == "schedule"'
+ when: on_success
+
+# Generic rule to not run the job during scheduled pipelines. Jobs that aren't
+# something like a nightly run should include this rule.
+.no_scheduled_pipelines-rules:
+ rules:
+ - if: *is-scheduled-pipeline
+ when: never
+
+# When to automatically run the CI for build jobs
+.build-rules:
+ rules:
+ - !reference [.no_scheduled_pipelines-rules, rules]
+ # Run automatically once all dependency jobs have passed
+ - when: on_success
+
+
+.ci-deqp-artifacts:
+ artifacts:
+ name: "mesa_${CI_JOB_NAME}"
+ when: always
+ untracked: false
+ paths:
+ # Watch out! Artifacts are relative to the build dir.
+ # https://gitlab.com/gitlab-org/gitlab-ce/commit/8788fb925706cad594adf6917a6c5f6587dd1521
+ - artifacts
+ - _build/meson-logs/*.txt
+ - _build/meson-logs/strace
+
+
+.container-rules:
+ rules:
+ - !reference [.no_scheduled_pipelines-rules, rules]
+ # Run pipeline by default in the main project if any CI pipeline
+ # configuration files were changed, to ensure docker images are up to date
+ - if: *is-post-merge
+ changes:
+ - drivers/gpu/drm/ci/**/*
+ when: on_success
+ # Run pipeline by default if it was triggered by Marge Bot, is for a
+ # merge request, and any files affecting the pipeline were changed
+ - if: *is-pre-merge-for-marge
+ when: on_success
+ # Run pipeline by default in the main project if it was not triggered by
+ # Marge Bot, and any files affecting the pipeline were changed
+ - if: *is-post-merge-not-for-marge
+ when: on_success
+ # Allow triggering jobs manually in other cases
+ - when: manual
+
+
+
+# Git archive
+
+make git archive:
+ extends:
+ - .fdo.ci-fairy
+ stage: git-archive
+ rules:
+ - !reference [.scheduled_pipeline-rules, rules]
+ # ensure we are running on packet
+ tags:
+ - packet.net
+ script:
+ # Remove drm-ci files we just added
+ - rm -rf .gitlab-ci.*
+
+ # Compactify the .git directory
+ - git gc --aggressive
+ # compress the current folder
+ - tar -cvzf ../$CI_PROJECT_NAME.tar.gz .
+
+ # login with the JWT token file
+ - ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" ../$CI_PROJECT_NAME.tar.gz https://$S3_HOST/git-cache/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/$CI_PROJECT_NAME.tar.gz
+
+
+# Sanity checks of MR settings and commit logs
+sanity:
+ extends:
+ - .fdo.ci-fairy
+ stage: sanity
+ rules:
+ - if: *is-pre-merge
+ when: on_success
+ # Other cases default to never
+ variables:
+ GIT_STRATEGY: none
+ script:
+ # ci-fairy check-commits --junit-xml=check-commits.xml
+ - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
+ artifacts:
+ when: on_failure
+ reports:
+ junit: check-*.xml
+
+# Rules for tests that should not block merging, but should be available to
+# optionally run with the "play" button in the UI in pre-merge non-marge
+# pipelines. This should appear in "extends:" after any includes of
+# test-source-dep.yml rules, so that these rules replace those.
+.test-manual-mr:
+ rules:
+ - !reference [.no_scheduled_pipelines-rules, rules]
+ - if: *is-forked-branch-or-pre-merge-not-for-marge
+ when: manual
+ variables:
+ JOB_TIMEOUT: 80
+
+
+# Jobs that need to pass before spending hardware resources on further testing
+.required-for-hardware-jobs:
+ needs: [] \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/igt_runner.sh b/drivers/gpu/drm/ci/igt_runner.sh
new file mode 100755
index 000000000000..2bb759165063
--- /dev/null
+++ b/drivers/gpu/drm/ci/igt_runner.sh
@@ -0,0 +1,77 @@
+#!/bin/sh
+# SPDX-License-Identifier: MIT
+
+set -ex
+
+export IGT_FORCE_DRIVER=${DRIVER_NAME}
+export PATH=$PATH:/igt/bin/
+export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/igt/lib/aarch64-linux-gnu/:/igt/lib/x86_64-linux-gnu:/igt/lib:/igt/lib64
+
+# Uncomment the below to debug problems with driver probing
+: '
+ls -l /dev/dri/
+cat /sys/kernel/debug/devices_deferred
+cat /sys/kernel/debug/device_component/*
+'
+
+# Dump drm state to confirm that kernel was able to find a connected display:
+# TODO this path might not exist for all drivers.. maybe run modetest instead?
+set +e
+cat /sys/kernel/debug/dri/*/state
+set -e
+
+# Cannot use HWCI_KERNEL_MODULES as at that point we don't have the module in /lib
+if [ "$IGT_FORCE_DRIVER" = "amdgpu" ]; then
+ mv /install/modules/lib/modules/* /lib/modules/.
+ modprobe amdgpu
+fi
+
+if [ -e "/install/xfails/$DRIVER_NAME-$GPU_VERSION-skips.txt" ]; then
+ IGT_SKIPS="--skips /install/xfails/$DRIVER_NAME-$GPU_VERSION-skips.txt"
+fi
+
+if [ -e "/install/xfails/$DRIVER_NAME-$GPU_VERSION-flakes.txt" ]; then
+ IGT_FLAKES="--flakes /install/xfails/$DRIVER_NAME-$GPU_VERSION-flakes.txt"
+fi
+
+if [ -e "/install/xfails/$DRIVER_NAME-$GPU_VERSION-fails.txt" ]; then
+ IGT_FAILS="--baseline /install/xfails/$DRIVER_NAME-$GPU_VERSION-fails.txt"
+fi
+
+if [ "`uname -m`" = "aarch64" ]; then
+ ARCH="arm64"
+elif [ "`uname -m`" = "armv7l" ]; then
+ ARCH="arm"
+else
+ ARCH="x86_64"
+fi
+
+curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s ${FDO_HTTP_CACHE_URI:-}$PIPELINE_ARTIFACTS_BASE/$ARCH/igt.tar.gz | tar --zstd -v -x -C /
+
+set +e
+igt-runner \
+ run \
+ --igt-folder /igt/libexec/igt-gpu-tools \
+ --caselist /install/testlist.txt \
+ --output /results \
+ $IGT_SKIPS \
+ $IGT_FLAKES \
+ $IGT_FAILS \
+ --fraction-start $CI_NODE_INDEX \
+ --fraction $CI_NODE_TOTAL \
+ --jobs 1
+ret=$?
+set -e
+
+deqp-runner junit \
+ --testsuite IGT \
+ --results /results/failures.csv \
+ --output /results/junit.xml \
+ --limit 50 \
+ --template "See https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/results/{{testcase}}.xml"
+
+# Store the results also in the simpler format used by the runner in ChromeOS CI
+#sed -r 's/(dmesg-warn|pass)/success/g' /results/results.txt > /results/results_simple.txt
+
+cd $oldpath
+exit $ret
diff --git a/drivers/gpu/drm/ci/image-tags.yml b/drivers/gpu/drm/ci/image-tags.yml
new file mode 100644
index 000000000000..f051b6c547c5
--- /dev/null
+++ b/drivers/gpu/drm/ci/image-tags.yml
@@ -0,0 +1,15 @@
+variables:
+ CONTAINER_TAG: "2023-08-10-mesa-uprev"
+ DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base"
+ DEBIAN_BASE_TAG: "${CONTAINER_TAG}"
+
+ DEBIAN_X86_64_BUILD_IMAGE_PATH: "debian/x86_64_build"
+ DEBIAN_BUILD_TAG: "${CONTAINER_TAG}"
+
+ KERNEL_ROOTFS_TAG: "${CONTAINER_TAG}"
+
+ DEBIAN_X86_64_TEST_BASE_IMAGE: "debian/x86_64_test-base"
+ DEBIAN_X86_64_TEST_IMAGE_GL_PATH: "debian/x86_64_test-gl"
+ DEBIAN_X86_64_TEST_GL_TAG: "${CONTAINER_TAG}"
+
+ ALPINE_X86_64_LAVA_SSH_TAG: "${CONTAINER_TAG}" \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/lava-submit.sh b/drivers/gpu/drm/ci/lava-submit.sh
new file mode 100755
index 000000000000..0c4456b21b0f
--- /dev/null
+++ b/drivers/gpu/drm/ci/lava-submit.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+# SPDX-License-Identifier: MIT
+
+set -e
+set -x
+
+# Try to use the kernel and rootfs built in mainline first, so we're more
+# likely to hit cache
+if curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s "https://${BASE_SYSTEM_MAINLINE_HOST_PATH}/done"; then
+ BASE_SYSTEM_HOST_PATH="${BASE_SYSTEM_MAINLINE_HOST_PATH}"
+else
+ BASE_SYSTEM_HOST_PATH="${BASE_SYSTEM_FORK_HOST_PATH}"
+fi
+
+rm -rf results
+mkdir -p results/job-rootfs-overlay/
+
+cp artifacts/ci-common/capture-devcoredump.sh results/job-rootfs-overlay/
+cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/
+cp artifacts/ci-common/intel-gpu-freq.sh results/job-rootfs-overlay/
+cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/
+
+# Prepare env vars for upload.
+section_start variables "Variables passed through:"
+KERNEL_IMAGE_BASE_URL="https://${BASE_SYSTEM_HOST_PATH}" \
+ artifacts/ci-common/generate-env.sh | tee results/job-rootfs-overlay/set-job-env-vars.sh
+section_end variables
+
+tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ .
+ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" job-rootfs-overlay.tar.gz "https://${JOB_ROOTFS_OVERLAY_PATH}"
+
+touch results/lava.log
+tail -f results/lava.log &
+
+PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
+ submit \
+ --dump-yaml \
+ --pipeline-info "$CI_JOB_NAME: $CI_PIPELINE_URL on $CI_COMMIT_REF_NAME ${CI_NODE_INDEX}/${CI_NODE_TOTAL}" \
+ --rootfs-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \
+ --kernel-url-prefix "https://${PIPELINE_ARTIFACTS_BASE}/${ARCH}" \
+ --build-url "${FDO_HTTP_CACHE_URI:-}https://${PIPELINE_ARTIFACTS_BASE}/${ARCH}/kernel-files.tar.zst" \
+ --job-rootfs-overlay-url "${FDO_HTTP_CACHE_URI:-}https://${JOB_ROOTFS_OVERLAY_PATH}" \
+ --job-timeout-min ${JOB_TIMEOUT:-80} \
+ --first-stage-init artifacts/ci-common/init-stage1.sh \
+ --ci-project-dir "${CI_PROJECT_DIR}" \
+ --device-type "${DEVICE_TYPE}" \
+ --dtb-filename "${DTB}" \
+ --jwt-file "${CI_JOB_JWT_FILE}" \
+ --kernel-image-name "${KERNEL_IMAGE_NAME}" \
+ --kernel-image-type "${KERNEL_IMAGE_TYPE}" \
+ --boot-method "${BOOT_METHOD}" \
+ --visibility-group "${VISIBILITY_GROUP}" \
+ --lava-tags "${LAVA_TAGS}" \
+ --mesa-job-name "$CI_JOB_NAME" \
+ --structured-log-file "results/lava_job_detail.json" \
+ --ssh-client-image "${LAVA_SSH_CLIENT_IMAGE}" \
+ >> results/lava.log
diff --git a/drivers/gpu/drm/ci/static-checks.yml b/drivers/gpu/drm/ci/static-checks.yml
new file mode 100644
index 000000000000..13ffa827b7fa
--- /dev/null
+++ b/drivers/gpu/drm/ci/static-checks.yml
@@ -0,0 +1,12 @@
+check-patch:
+ extends:
+ - .build
+ - .use-debian/x86_64_build
+ script:
+ - drivers/gpu/drm/ci/check-patch.py
+ variables:
+ CHECKPATCH_TYPES: "BAD_SIGN_OFF,BAD_STABLE_ADDRESS_STYLE,COMMIT_COMMENT_SYMBOL,COMMIT_MESSAGE,EMAIL_SUBJECT,FROM_SIGN_OFF_MISMATCH,MISSING_SIGN_OFF,NO_AUTHOR_SIGN_OFF,DIFF_IN_COMMIT_MSG,GERRIT_CHANGE_ID,GIT_COMMIT_ID,UNKNOWN_COMMIT_ID,CODE_INDENT,BIT_MACRO,DOS_LINE_ENDINGS"
+ rules:
+ - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
+ when: on_success
+ # Other cases default to never
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
new file mode 100644
index 000000000000..6473cddaa7a9
--- /dev/null
+++ b/drivers/gpu/drm/ci/test.yml
@@ -0,0 +1,335 @@
+.test-rules:
+ rules:
+ - if: '$FD_FARM == "offline" && $RUNNER_TAG =~ /^google-freedreno-/'
+ when: never
+ - if: '$COLLABORA_FARM == "offline" && $RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
+ when: never
+ - !reference [.no_scheduled_pipelines-rules, rules]
+ - when: on_success
+
+.lava-test:
+ extends:
+ - .test-rules
+ script:
+ # Note: Build dir (and thus install) may be dirty due to GIT_STRATEGY
+ - rm -rf install
+ - tar -xf artifacts/install.tar
+ - mv install/* artifacts/.
+ # Override it with our lava-submit.sh script
+ - ./artifacts/lava-submit.sh
+
+.lava-igt:arm32:
+ extends:
+ - .lava-test:arm32
+ variables:
+ HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
+ ARCH: "armhf"
+ dependencies:
+ - testing:arm32
+ needs:
+ - alpine/x86_64_lava_ssh_client
+ - kernel+rootfs_arm32
+ - debian/x86_64_build
+ - testing:arm32
+ - igt:arm32
+
+.lava-igt:arm64:
+ extends:
+ - .lava-test:arm64
+ variables:
+ HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
+ ARCH: "arm64"
+ dependencies:
+ - testing:arm64
+ needs:
+ - alpine/x86_64_lava_ssh_client
+ - kernel+rootfs_arm64
+ - debian/x86_64_build
+ - testing:arm64
+ - igt:arm64
+
+.lava-igt:x86_64:
+ extends:
+ - .lava-test:x86_64
+ variables:
+ HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
+ ARCH: "x86_64"
+ dependencies:
+ - testing:x86_64
+ needs:
+ - alpine/x86_64_lava_ssh_client
+ - kernel+rootfs_x86_64
+ - debian/x86_64_build
+ - testing:x86_64
+ - igt:x86_64
+
+.baremetal-igt-arm64:
+ extends:
+ - .baremetal-test-arm64
+ - .use-debian/arm64_test
+ - .test-rules
+ variables:
+ FDO_CI_CONCURRENT: 10
+ HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
+ S3_ARTIFACT_NAME: "arm64/kernel-files"
+ BM_KERNEL: https://${PIPELINE_ARTIFACTS_BASE}/arm64/Image.gz
+ BM_CMDLINE: "ip=dhcp console=ttyMSM0,115200n8 $BM_KERNEL_EXTRA_ARGS root=/dev/nfs rw nfsrootdebug nfsroot=,tcp,nfsvers=4.2 init=/init $BM_KERNELARGS"
+ needs:
+ - debian/arm64_test
+ - job: testing:arm64
+ artifacts: false
+ - igt:arm64
+ tags:
+ - $RUNNER_TAG
+
+msm:sc7180:
+ extends:
+ - .lava-igt:arm64
+ stage: msm
+ parallel: 2
+ variables:
+ DRIVER_NAME: msm
+ DEVICE_TYPE: sc7180-trogdor-lazor-limozeen
+ DTB: sc7180-trogdor-lazor-limozeen-nots-r5
+ BOOT_METHOD: depthcharge
+ KERNEL_IMAGE_TYPE: ""
+ GPU_VERSION: sc7180
+ RUNNER_TAG: mesa-ci-x86-64-lava-sc7180-trogdor-lazor-limozeen
+
+msm:apq8016:
+ extends:
+ - .baremetal-igt-arm64
+ stage: msm
+ variables:
+ DRIVER_NAME: msm
+ BM_DTB: https://${PIPELINE_ARTIFACTS_BASE}/arm64/apq8016-sbc.dtb
+ GPU_VERSION: apq8016
+ BM_CMDLINE: "ip=dhcp console=ttyMSM0,115200n8 $BM_KERNEL_EXTRA_ARGS root=/dev/nfs rw nfsrootdebug nfsroot=,tcp,nfsvers=4.2 init=/init $BM_KERNELARGS"
+ RUNNER_TAG: google-freedreno-db410c
+ script:
+ - ./install/bare-metal/fastboot.sh
+ rules:
+ # TODO: current issue: it is not fiding the NFS root. Fix and remove this rule.
+ - when: never
+
+msm:apq8096:
+ extends:
+ - .baremetal-igt-arm64
+ stage: msm
+ variables:
+ DRIVER_NAME: msm
+ BM_KERNEL_EXTRA_ARGS: maxcpus=2
+ BM_DTB: https://${PIPELINE_ARTIFACTS_BASE}/arm64/apq8096-db820c.dtb
+ GPU_VERSION: apq8096
+ RUNNER_TAG: google-freedreno-db820c
+ script:
+ - ./install/bare-metal/fastboot.sh
+
+msm:sdm845:
+ extends:
+ - .baremetal-igt-arm64
+ stage: msm
+ parallel: 6
+ variables:
+ DRIVER_NAME: msm
+ BM_KERNEL: https://${PIPELINE_ARTIFACTS_BASE}/arm64/cheza-kernel
+ GPU_VERSION: sdm845
+ RUNNER_TAG: google-freedreno-cheza
+ script:
+ - ./install/bare-metal/cros-servo.sh
+
+rockchip:rk3288:
+ extends:
+ - .lava-igt:arm32
+ stage: rockchip
+ variables:
+ DRIVER_NAME: rockchip
+ DEVICE_TYPE: rk3288-veyron-jaq
+ DTB: ${DEVICE_TYPE}
+ BOOT_METHOD: depthcharge
+ KERNEL_IMAGE_TYPE: "zimage"
+ GPU_VERSION: rk3288
+ RUNNER_TAG: mesa-ci-x86-64-lava-rk3288-veyron-jaq
+
+rockchip:rk3399:
+ extends:
+ - .lava-igt:arm64
+ stage: rockchip
+ parallel: 3
+ variables:
+ DRIVER_NAME: rockchip
+ DEVICE_TYPE: rk3399-gru-kevin
+ DTB: ${DEVICE_TYPE}
+ BOOT_METHOD: depthcharge
+ KERNEL_IMAGE_TYPE: ""
+ GPU_VERSION: rk3399
+ RUNNER_TAG: mesa-ci-x86-64-lava-rk3399-gru-kevin
+
+.i915:
+ extends:
+ - .lava-igt:x86_64
+ stage: i915
+ variables:
+ DRIVER_NAME: i915
+ DTB: ""
+ BOOT_METHOD: depthcharge
+ KERNEL_IMAGE_TYPE: ""
+
+i915:apl:
+ extends:
+ - .i915
+ parallel: 12
+ variables:
+ DEVICE_TYPE: asus-C523NA-A20057-coral
+ GPU_VERSION: apl
+ RUNNER_TAG: mesa-ci-x86-64-lava-asus-C523NA-A20057-coral
+
+i915:glk:
+ extends:
+ - .i915
+ parallel: 5
+ variables:
+ DEVICE_TYPE: hp-x360-12b-ca0010nr-n4020-octopus
+ GPU_VERSION: glk
+ RUNNER_TAG: mesa-ci-x86-64-lava-hp-x360-12b-ca0010nr-n4020-octopus
+
+i915:amly:
+ extends:
+ - .i915
+ parallel: 8
+ variables:
+ DEVICE_TYPE: asus-C433TA-AJ0005-rammus
+ GPU_VERSION: amly
+ RUNNER_TAG: mesa-ci-x86-64-lava-asus-C433TA-AJ0005-rammus
+
+i915:kbl:
+ extends:
+ - .i915
+ parallel: 5
+ variables:
+ DEVICE_TYPE: hp-x360-14-G1-sona
+ GPU_VERSION: kbl
+ RUNNER_TAG: mesa-ci-x86-64-lava-hp-x360-14-G1-sona
+
+i915:whl:
+ extends:
+ - .i915
+ parallel: 8
+ variables:
+ DEVICE_TYPE: dell-latitude-5400-8665U-sarien
+ GPU_VERSION: whl
+ RUNNER_TAG: mesa-ci-x86-64-lava-dell-latitude-5400-8665U-sarien
+
+i915:cml:
+ extends:
+ - .i915
+ parallel: 6
+ variables:
+ DEVICE_TYPE: asus-C436FA-Flip-hatch
+ GPU_VERSION: cml
+ RUNNER_TAG: mesa-ci-x86-64-lava-asus-C436FA-flip-hatch
+
+i915:tgl:
+ extends:
+ - .i915
+ parallel: 6
+ variables:
+ DEVICE_TYPE: asus-cx9400-volteer
+ GPU_VERSION: tgl
+ RUNNER_TAG: mesa-ci-x86-64-lava-asus-cx9400-volteer
+
+.amdgpu:
+ extends:
+ - .lava-igt:x86_64
+ stage: amdgpu
+ variables:
+ DRIVER_NAME: amdgpu
+ DTB: ""
+ BOOT_METHOD: depthcharge
+ KERNEL_IMAGE_TYPE: ""
+
+amdgpu:stoney:
+ extends:
+ - .amdgpu
+ variables:
+ DEVICE_TYPE: hp-11A-G6-EE-grunt
+ GPU_VERSION: stoney
+ RUNNER_TAG: mesa-ci-x86-64-lava-hp-11A-G6-EE-grunt
+
+.mediatek:
+ extends:
+ - .lava-igt:arm64
+ stage: mediatek
+ variables:
+ DRIVER_NAME: mediatek
+ DTB: ${DEVICE_TYPE}
+ BOOT_METHOD: depthcharge
+ KERNEL_IMAGE_TYPE: ""
+
+mediatek:mt8173:
+ extends:
+ - .mediatek
+ variables:
+ DEVICE_TYPE: mt8173-elm-hana
+ GPU_VERSION: mt8173
+ RUNNER_TAG: mesa-ci-x86-64-lava-mt8173-elm-hana
+ rules:
+ # TODO: current issue: device is hanging. Fix and remove this rule.
+ - when: never
+
+mediatek:mt8183:
+ extends:
+ - .mediatek
+ variables:
+ DEVICE_TYPE: mt8183-kukui-jacuzzi-juniper-sku16
+ GPU_VERSION: mt8183
+ RUNNER_TAG: mesa-ci-x86-64-lava-mt8183-kukui-jacuzzi-juniper-sku16
+
+# drm-mtk doesn't even probe yet in mainline for mt8192
+.mediatek:mt8192:
+ extends:
+ - .mediatek
+ variables:
+ DEVICE_TYPE: mt8192-asurada-spherion-r0
+ GPU_VERSION: mt8192
+ RUNNER_TAG: mesa-ci-x86-64-lava-mt8192-asurada-spherion-r0
+
+.meson:
+ extends:
+ - .lava-igt:arm64
+ stage: meson
+ variables:
+ DRIVER_NAME: meson
+ DTB: ${DEVICE_TYPE}
+ BOOT_METHOD: u-boot
+ KERNEL_IMAGE_TYPE: "image"
+
+meson:g12b:
+ extends:
+ - .meson
+ variables:
+ DEVICE_TYPE: meson-g12b-a311d-khadas-vim3
+ GPU_VERSION: g12b
+ RUNNER_TAG: mesa-ci-x86-64-lava-meson-g12b-a311d-khadas-vim3
+
+virtio_gpu:none:
+ stage: virtio-gpu
+ variables:
+ CROSVM_GALLIUM_DRIVER: llvmpipe
+ DRIVER_NAME: virtio_gpu
+ GPU_VERSION: none
+ extends:
+ - .test-gl
+ tags:
+ - kvm
+ script:
+ - ln -sf $CI_PROJECT_DIR/install /install
+ - mv install/bzImage /lava-files/bzImage
+ - install/crosvm-runner.sh install/igt_runner.sh
+ needs:
+ - debian/x86_64_test-gl
+ - testing:x86_64
+ - igt:x86_64
+ rules:
+ # TODO: current issue: malloc(): corrupted top size. Fix and remove this rule.
+ - when: never \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/testlist.txt b/drivers/gpu/drm/ci/testlist.txt
new file mode 100644
index 000000000000..f82cd90372f4
--- /dev/null
+++ b/drivers/gpu/drm/ci/testlist.txt
@@ -0,0 +1,2912 @@
+core_auth@getclient-simple
+core_auth@getclient-master-drop
+core_auth@basic-auth
+core_auth@many-magics
+core_getclient
+core_getstats
+core_getversion
+core_setmaster_vs_auth
+drm_read@invalid-buffer
+drm_read@fault-buffer
+drm_read@empty-block
+drm_read@empty-nonblock
+drm_read@short-buffer-block
+drm_read@short-buffer-nonblock
+drm_read@short-buffer-wakeup
+gem_eio@throttle
+gem_eio@create
+gem_eio@create-ext
+gem_eio@context-create
+gem_eio@execbuf
+gem_eio@banned
+gem_eio@suspend
+gem_eio@hibernate
+gem_eio@in-flight-external
+gem_eio@in-flight-suspend
+gem_eio@reset-stress
+gem_eio@unwedge-stress
+gem_eio@wait-immediate
+gem_eio@wait-wedge-immediate
+gem_eio@in-flight-immediate
+gem_eio@in-flight-contexts-immediate
+gem_eio@in-flight-internal-immediate
+gem_eio@wait-1us
+gem_eio@wait-wedge-1us
+gem_eio@in-flight-1us
+gem_eio@in-flight-contexts-1us
+gem_eio@in-flight-internal-1us
+gem_eio@wait-10ms
+gem_eio@wait-wedge-10ms
+gem_eio@in-flight-10ms
+gem_eio@in-flight-contexts-10ms
+gem_eio@in-flight-internal-10ms
+gem_eio@kms
+kms_3d
+kms_addfb_basic@unused-handle
+kms_addfb_basic@unused-pitches
+kms_addfb_basic@unused-offsets
+kms_addfb_basic@unused-modifier
+kms_addfb_basic@clobberred-modifier
+kms_addfb_basic@invalid-smem-bo-on-discrete
+kms_addfb_basic@legacy-format
+kms_addfb_basic@no-handle
+kms_addfb_basic@basic
+kms_addfb_basic@bad-pitch-0
+kms_addfb_basic@bad-pitch-32
+kms_addfb_basic@bad-pitch-63
+kms_addfb_basic@bad-pitch-128
+kms_addfb_basic@bad-pitch-256
+kms_addfb_basic@bad-pitch-1024
+kms_addfb_basic@bad-pitch-999
+kms_addfb_basic@bad-pitch-65536
+kms_addfb_basic@invalid-get-prop-any
+kms_addfb_basic@invalid-get-prop
+kms_addfb_basic@invalid-set-prop-any
+kms_addfb_basic@invalid-set-prop
+kms_addfb_basic@master-rmfb
+kms_addfb_basic@addfb25-modifier-no-flag
+kms_addfb_basic@addfb25-bad-modifier
+kms_addfb_basic@addfb25-x-tiled-mismatch-legacy
+kms_addfb_basic@addfb25-x-tiled-legacy
+kms_addfb_basic@addfb25-framebuffer-vs-set-tiling
+kms_addfb_basic@basic-x-tiled-legacy
+kms_addfb_basic@framebuffer-vs-set-tiling
+kms_addfb_basic@tile-pitch-mismatch
+kms_addfb_basic@basic-y-tiled-legacy
+kms_addfb_basic@size-max
+kms_addfb_basic@too-wide
+kms_addfb_basic@too-high
+kms_addfb_basic@bo-too-small
+kms_addfb_basic@small-bo
+kms_addfb_basic@bo-too-small-due-to-tiling
+kms_addfb_basic@addfb25-y-tiled-legacy
+kms_addfb_basic@addfb25-yf-tiled-legacy
+kms_addfb_basic@addfb25-y-tiled-small-legacy
+kms_addfb_basic@addfb25-4-tiled
+kms_async_flips@async-flip-with-page-flip-events
+kms_async_flips@alternate-sync-async-flip
+kms_async_flips@test-time-stamp
+kms_async_flips@test-cursor
+kms_async_flips@invalid-async-flip
+kms_async_flips@crc
+kms_atomic@plane-overlay-legacy
+kms_atomic@plane-primary-legacy
+kms_atomic@plane-primary-overlay-mutable-zpos
+kms_atomic@plane-immutable-zpos
+kms_atomic@test-only
+kms_atomic@plane-cursor-legacy
+kms_atomic@plane-invalid-params
+kms_atomic@plane-invalid-params-fence
+kms_atomic@crtc-invalid-params
+kms_atomic@crtc-invalid-params-fence
+kms_atomic@atomic-invalid-params
+kms_atomic@atomic_plane_damage
+kms_atomic_interruptible@legacy-setmode
+kms_atomic_interruptible@atomic-setmode
+kms_atomic_interruptible@legacy-dpms
+kms_atomic_interruptible@legacy-pageflip
+kms_atomic_interruptible@legacy-cursor
+kms_atomic_interruptible@universal-setplane-primary
+kms_atomic_interruptible@universal-setplane-cursor
+kms_atomic_transition@plane-primary-toggle-with-vblank-wait
+kms_atomic_transition@plane-all-transition
+kms_atomic_transition@plane-all-transition-fencing
+kms_atomic_transition@plane-all-transition-nonblocking
+kms_atomic_transition@plane-all-transition-nonblocking-fencing
+kms_atomic_transition@plane-use-after-nonblocking-unbind
+kms_atomic_transition@plane-use-after-nonblocking-unbind-fencing
+kms_atomic_transition@plane-all-modeset-transition
+kms_atomic_transition@plane-all-modeset-transition-fencing
+kms_atomic_transition@plane-all-modeset-transition-internal-panels
+kms_atomic_transition@plane-all-modeset-transition-fencing-internal-panels
+kms_atomic_transition@plane-toggle-modeset-transition
+kms_atomic_transition@modeset-transition
+kms_atomic_transition@modeset-transition-fencing
+kms_atomic_transition@modeset-transition-nonblocking
+kms_atomic_transition@modeset-transition-nonblocking-fencing
+kms_big_fb@x-tiled-addfb-size-overflow
+kms_big_fb@y-tiled-addfb-size-overflow
+kms_big_fb@yf-tiled-addfb-size-overflow
+kms_big_fb@4-tiled-addfb-size-overflow
+kms_big_fb@x-tiled-addfb-size-offset-overflow
+kms_big_fb@y-tiled-addfb-size-offset-overflow
+kms_big_fb@yf-tiled-addfb-size-offset-overflow
+kms_big_fb@4-tiled-addfb-size-offset-overflow
+kms_big_fb@linear-addfb
+kms_big_fb@x-tiled-addfb
+kms_big_fb@y-tiled-addfb
+kms_big_fb@yf-tiled-addfb
+kms_big_fb@4-tiled-addfb
+kms_big_fb@linear-8bpp-rotate-0
+kms_big_fb@linear-8bpp-rotate-90
+kms_big_fb@linear-8bpp-rotate-180
+kms_big_fb@linear-8bpp-rotate-270
+kms_big_fb@linear-16bpp-rotate-0
+kms_big_fb@linear-16bpp-rotate-90
+kms_big_fb@linear-16bpp-rotate-180
+kms_big_fb@linear-16bpp-rotate-270
+kms_big_fb@linear-32bpp-rotate-0
+kms_big_fb@linear-32bpp-rotate-90
+kms_big_fb@linear-32bpp-rotate-180
+kms_big_fb@linear-32bpp-rotate-270
+kms_big_fb@linear-64bpp-rotate-0
+kms_big_fb@linear-64bpp-rotate-90
+kms_big_fb@linear-64bpp-rotate-180
+kms_big_fb@linear-64bpp-rotate-270
+kms_big_fb@x-tiled-8bpp-rotate-0
+kms_big_fb@x-tiled-8bpp-rotate-90
+kms_big_fb@x-tiled-8bpp-rotate-180
+kms_big_fb@x-tiled-8bpp-rotate-270
+kms_big_fb@x-tiled-16bpp-rotate-0
+kms_big_fb@x-tiled-16bpp-rotate-90
+kms_big_fb@x-tiled-16bpp-rotate-180
+kms_big_fb@x-tiled-16bpp-rotate-270
+kms_big_fb@x-tiled-32bpp-rotate-0
+kms_big_fb@x-tiled-32bpp-rotate-90
+kms_big_fb@x-tiled-32bpp-rotate-180
+kms_big_fb@x-tiled-32bpp-rotate-270
+kms_big_fb@x-tiled-64bpp-rotate-0
+kms_big_fb@x-tiled-64bpp-rotate-90
+kms_big_fb@x-tiled-64bpp-rotate-180
+kms_big_fb@x-tiled-64bpp-rotate-270
+kms_big_fb@y-tiled-8bpp-rotate-0
+kms_big_fb@y-tiled-8bpp-rotate-90
+kms_big_fb@y-tiled-8bpp-rotate-180
+kms_big_fb@y-tiled-8bpp-rotate-270
+kms_big_fb@y-tiled-16bpp-rotate-0
+kms_big_fb@y-tiled-16bpp-rotate-90
+kms_big_fb@y-tiled-16bpp-rotate-180
+kms_big_fb@y-tiled-16bpp-rotate-270
+kms_big_fb@y-tiled-32bpp-rotate-0
+kms_big_fb@y-tiled-32bpp-rotate-90
+kms_big_fb@y-tiled-32bpp-rotate-180
+kms_big_fb@y-tiled-32bpp-rotate-270
+kms_big_fb@y-tiled-64bpp-rotate-0
+kms_big_fb@y-tiled-64bpp-rotate-90
+kms_big_fb@y-tiled-64bpp-rotate-180
+kms_big_fb@y-tiled-64bpp-rotate-270
+kms_big_fb@yf-tiled-8bpp-rotate-0
+kms_big_fb@yf-tiled-8bpp-rotate-90
+kms_big_fb@yf-tiled-8bpp-rotate-180
+kms_big_fb@yf-tiled-8bpp-rotate-270
+kms_big_fb@yf-tiled-16bpp-rotate-0
+kms_big_fb@yf-tiled-16bpp-rotate-90
+kms_big_fb@yf-tiled-16bpp-rotate-180
+kms_big_fb@yf-tiled-16bpp-rotate-270
+kms_big_fb@yf-tiled-32bpp-rotate-0
+kms_big_fb@yf-tiled-32bpp-rotate-90
+kms_big_fb@yf-tiled-32bpp-rotate-180
+kms_big_fb@yf-tiled-32bpp-rotate-270
+kms_big_fb@yf-tiled-64bpp-rotate-0
+kms_big_fb@yf-tiled-64bpp-rotate-90
+kms_big_fb@yf-tiled-64bpp-rotate-180
+kms_big_fb@yf-tiled-64bpp-rotate-270
+kms_big_fb@4-tiled-8bpp-rotate-0
+kms_big_fb@4-tiled-8bpp-rotate-90
+kms_big_fb@4-tiled-8bpp-rotate-180
+kms_big_fb@4-tiled-8bpp-rotate-270
+kms_big_fb@4-tiled-16bpp-rotate-0
+kms_big_fb@4-tiled-16bpp-rotate-90
+kms_big_fb@4-tiled-16bpp-rotate-180
+kms_big_fb@4-tiled-16bpp-rotate-270
+kms_big_fb@4-tiled-32bpp-rotate-0
+kms_big_fb@4-tiled-32bpp-rotate-90
+kms_big_fb@4-tiled-32bpp-rotate-180
+kms_big_fb@4-tiled-32bpp-rotate-270
+kms_big_fb@4-tiled-64bpp-rotate-0
+kms_big_fb@4-tiled-64bpp-rotate-90
+kms_big_fb@4-tiled-64bpp-rotate-180
+kms_big_fb@4-tiled-64bpp-rotate-270
+kms_big_fb@linear-max-hw-stride-32bpp-rotate-0
+kms_big_fb@linear-max-hw-stride-32bpp-rotate-180
+kms_big_fb@linear-max-hw-stride-64bpp-rotate-0
+kms_big_fb@linear-max-hw-stride-64bpp-rotate-180
+kms_big_fb@x-tiled-max-hw-stride-32bpp-rotate-0
+kms_big_fb@x-tiled-max-hw-stride-32bpp-rotate-0-async-flip
+kms_big_fb@x-tiled-max-hw-stride-32bpp-rotate-180
+kms_big_fb@x-tiled-max-hw-stride-32bpp-rotate-180-async-flip
+kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-0
+kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-0-async-flip
+kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-180
+kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-180-async-flip
+kms_big_fb@x-tiled-max-hw-stride-32bpp-rotate-0-hflip
+kms_big_fb@x-tiled-max-hw-stride-32bpp-rotate-0-hflip-async-flip
+kms_big_fb@x-tiled-max-hw-stride-32bpp-rotate-180-hflip
+kms_big_fb@x-tiled-max-hw-stride-32bpp-rotate-180-hflip-async-flip
+kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-0-hflip
+kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-0-hflip-async-flip
+kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-180-hflip
+kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-180-hflip-async-flip
+kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-0
+kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-0-async-flip
+kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-180
+kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-180-async-flip
+kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-0
+kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-0-async-flip
+kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-180
+kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-180-async-flip
+kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-0-hflip
+kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-0-hflip-async-flip
+kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-180-hflip
+kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-180-hflip-async-flip
+kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-0-hflip
+kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-0-hflip-async-flip
+kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-180-hflip
+kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-180-hflip-async-flip
+kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-0
+kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-0-async-flip
+kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-180
+kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-180-async-flip
+kms_big_fb@yf-tiled-max-hw-stride-64bpp-rotate-0
+kms_big_fb@yf-tiled-max-hw-stride-64bpp-rotate-0-async-flip
+kms_big_fb@yf-tiled-max-hw-stride-64bpp-rotate-180
+kms_big_fb@yf-tiled-max-hw-stride-64bpp-rotate-180-async-flip
+kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-0-hflip
+kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-0-hflip-async-flip
+kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-180-hflip
+kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-180-hflip-async-flip
+kms_big_fb@yf-tiled-max-hw-stride-64bpp-rotate-0-hflip
+kms_big_fb@yf-tiled-max-hw-stride-64bpp-rotate-0-hflip-async-flip
+kms_big_fb@yf-tiled-max-hw-stride-64bpp-rotate-180-hflip
+kms_big_fb@yf-tiled-max-hw-stride-64bpp-rotate-180-hflip-async-flip
+kms_big_fb@4-tiled-max-hw-stride-32bpp-rotate-0
+kms_big_fb@4-tiled-max-hw-stride-32bpp-rotate-0-async-flip
+kms_big_fb@4-tiled-max-hw-stride-32bpp-rotate-180
+kms_big_fb@4-tiled-max-hw-stride-32bpp-rotate-180-async-flip
+kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-0
+kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-0-async-flip
+kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-180
+kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-180-async-flip
+kms_big_fb@4-tiled-max-hw-stride-32bpp-rotate-0-hflip
+kms_big_fb@4-tiled-max-hw-stride-32bpp-rotate-0-hflip-async-flip
+kms_big_fb@4-tiled-max-hw-stride-32bpp-rotate-180-hflip
+kms_big_fb@4-tiled-max-hw-stride-32bpp-rotate-180-hflip-async-flip
+kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-0-hflip
+kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-0-hflip-async-flip
+kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-180-hflip
+kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-180-hflip-async-flip
+kms_big_joiner@basic
+kms_big_joiner@invalid-modeset
+kms_big_joiner@2x-modeset
+kms_busy@basic
+kms_busy@basic-hang
+kms_busy@extended-pageflip-modeset-hang-oldfb
+kms_busy@extended-pageflip-hang-oldfb
+kms_busy@extended-pageflip-hang-newfb
+kms_busy@extended-modeset-hang-oldfb
+kms_busy@extended-modeset-hang-newfb
+kms_busy@extended-modeset-hang-oldfb-with-reset
+kms_busy@extended-modeset-hang-newfb-with-reset
+kms_bw@linear-tiling-1-displays-1920x1080p
+kms_bw@linear-tiling-1-displays-2560x1440p
+kms_bw@linear-tiling-1-displays-3840x2160p
+kms_bw@linear-tiling-2-displays-1920x1080p
+kms_bw@linear-tiling-2-displays-2560x1440p
+kms_bw@linear-tiling-2-displays-3840x2160p
+kms_bw@linear-tiling-3-displays-1920x1080p
+kms_bw@linear-tiling-3-displays-2560x1440p
+kms_bw@linear-tiling-3-displays-3840x2160p
+kms_bw@linear-tiling-4-displays-1920x1080p
+kms_bw@linear-tiling-4-displays-2560x1440p
+kms_bw@linear-tiling-4-displays-3840x2160p
+kms_bw@linear-tiling-5-displays-1920x1080p
+kms_bw@linear-tiling-5-displays-2560x1440p
+kms_bw@linear-tiling-5-displays-3840x2160p
+kms_bw@linear-tiling-6-displays-1920x1080p
+kms_bw@linear-tiling-6-displays-2560x1440p
+kms_bw@linear-tiling-6-displays-3840x2160p
+kms_bw@linear-tiling-7-displays-1920x1080p
+kms_bw@linear-tiling-7-displays-2560x1440p
+kms_bw@linear-tiling-7-displays-3840x2160p
+kms_bw@linear-tiling-8-displays-1920x1080p
+kms_bw@linear-tiling-8-displays-2560x1440p
+kms_bw@linear-tiling-8-displays-3840x2160p
+kms_ccs@pipe-A-bad-pixel-format-y_tiled_ccs
+kms_ccs@pipe-A-bad-pixel-format-yf_tiled_ccs
+kms_ccs@pipe-A-bad-pixel-format-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-A-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-A-bad-pixel-format-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-A-bad-pixel-format-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-A-bad-pixel-format-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-A-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-A-bad-pixel-format-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-A-bad-pixel-format-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-A-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-A-bad-rotation-90-y_tiled_ccs
+kms_ccs@pipe-A-bad-rotation-90-yf_tiled_ccs
+kms_ccs@pipe-A-bad-rotation-90-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-A-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-A-bad-rotation-90-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-A-bad-rotation-90-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-A-bad-rotation-90-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-A-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-A-bad-rotation-90-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-A-bad-rotation-90-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-A-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-A-crc-primary-basic-y_tiled_ccs
+kms_ccs@pipe-A-crc-primary-basic-yf_tiled_ccs
+kms_ccs@pipe-A-crc-primary-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-A-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-A-crc-primary-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-A-crc-primary-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-A-crc-primary-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-A-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-A-crc-primary-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-A-crc-primary-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-A-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-A-crc-primary-rotation-180-y_tiled_ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-yf_tiled_ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-A-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-A-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-A-random-ccs-data-y_tiled_ccs
+kms_ccs@pipe-A-random-ccs-data-yf_tiled_ccs
+kms_ccs@pipe-A-random-ccs-data-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-A-random-ccs-data-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-A-random-ccs-data-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-A-random-ccs-data-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-A-random-ccs-data-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-A-random-ccs-data-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-A-random-ccs-data-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-A-random-ccs-data-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-A-random-ccs-data-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-A-missing-ccs-buffer-y_tiled_ccs
+kms_ccs@pipe-A-missing-ccs-buffer-yf_tiled_ccs
+kms_ccs@pipe-A-missing-ccs-buffer-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-A-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-A-missing-ccs-buffer-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-A-missing-ccs-buffer-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-A-missing-ccs-buffer-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-A-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-A-ccs-on-another-bo-y_tiled_ccs
+kms_ccs@pipe-A-ccs-on-another-bo-yf_tiled_ccs
+kms_ccs@pipe-A-ccs-on-another-bo-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-A-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-A-ccs-on-another-bo-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-A-ccs-on-another-bo-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-A-ccs-on-another-bo-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-A-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-A-bad-aux-stride-y_tiled_ccs
+kms_ccs@pipe-A-bad-aux-stride-yf_tiled_ccs
+kms_ccs@pipe-A-bad-aux-stride-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-A-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-A-bad-aux-stride-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-A-bad-aux-stride-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-A-bad-aux-stride-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-A-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-A-crc-sprite-planes-basic-y_tiled_ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-yf_tiled_ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-A-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-A-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-B-bad-pixel-format-y_tiled_ccs
+kms_ccs@pipe-B-bad-pixel-format-yf_tiled_ccs
+kms_ccs@pipe-B-bad-pixel-format-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-B-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-B-bad-pixel-format-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-B-bad-pixel-format-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-B-bad-pixel-format-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-B-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-B-bad-pixel-format-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-B-bad-pixel-format-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-B-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-B-bad-rotation-90-y_tiled_ccs
+kms_ccs@pipe-B-bad-rotation-90-yf_tiled_ccs
+kms_ccs@pipe-B-bad-rotation-90-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-B-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-B-bad-rotation-90-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-B-bad-rotation-90-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-B-bad-rotation-90-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-B-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-B-bad-rotation-90-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-B-bad-rotation-90-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-B-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-B-crc-primary-basic-y_tiled_ccs
+kms_ccs@pipe-B-crc-primary-basic-yf_tiled_ccs
+kms_ccs@pipe-B-crc-primary-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-B-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-B-crc-primary-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-B-crc-primary-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-B-crc-primary-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-B-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-B-crc-primary-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-B-crc-primary-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-B-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-B-crc-primary-rotation-180-y_tiled_ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-yf_tiled_ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-B-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-B-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-B-random-ccs-data-y_tiled_ccs
+kms_ccs@pipe-B-random-ccs-data-yf_tiled_ccs
+kms_ccs@pipe-B-random-ccs-data-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-B-random-ccs-data-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-B-random-ccs-data-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-B-random-ccs-data-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-B-random-ccs-data-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-B-random-ccs-data-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-B-random-ccs-data-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-B-random-ccs-data-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-B-random-ccs-data-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-B-missing-ccs-buffer-y_tiled_ccs
+kms_ccs@pipe-B-missing-ccs-buffer-yf_tiled_ccs
+kms_ccs@pipe-B-missing-ccs-buffer-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-B-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-B-missing-ccs-buffer-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-B-missing-ccs-buffer-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-B-missing-ccs-buffer-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-B-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-B-ccs-on-another-bo-y_tiled_ccs
+kms_ccs@pipe-B-ccs-on-another-bo-yf_tiled_ccs
+kms_ccs@pipe-B-ccs-on-another-bo-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-B-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-B-ccs-on-another-bo-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-B-ccs-on-another-bo-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-B-ccs-on-another-bo-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-B-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-B-bad-aux-stride-y_tiled_ccs
+kms_ccs@pipe-B-bad-aux-stride-yf_tiled_ccs
+kms_ccs@pipe-B-bad-aux-stride-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-B-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-B-bad-aux-stride-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-B-bad-aux-stride-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-B-bad-aux-stride-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-B-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-B-crc-sprite-planes-basic-y_tiled_ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-yf_tiled_ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-B-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-B-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-C-bad-pixel-format-y_tiled_ccs
+kms_ccs@pipe-C-bad-pixel-format-yf_tiled_ccs
+kms_ccs@pipe-C-bad-pixel-format-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-C-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-C-bad-pixel-format-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-C-bad-pixel-format-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-C-bad-pixel-format-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-C-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-C-bad-pixel-format-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-C-bad-pixel-format-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-C-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-C-bad-rotation-90-y_tiled_ccs
+kms_ccs@pipe-C-bad-rotation-90-yf_tiled_ccs
+kms_ccs@pipe-C-bad-rotation-90-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-C-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-C-bad-rotation-90-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-C-bad-rotation-90-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-C-bad-rotation-90-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-C-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-C-bad-rotation-90-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-C-bad-rotation-90-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-C-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-C-crc-primary-basic-y_tiled_ccs
+kms_ccs@pipe-C-crc-primary-basic-yf_tiled_ccs
+kms_ccs@pipe-C-crc-primary-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-C-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-C-crc-primary-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-C-crc-primary-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-C-crc-primary-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-C-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-C-crc-primary-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-C-crc-primary-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-C-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-C-crc-primary-rotation-180-y_tiled_ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-yf_tiled_ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-C-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-C-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-C-random-ccs-data-y_tiled_ccs
+kms_ccs@pipe-C-random-ccs-data-yf_tiled_ccs
+kms_ccs@pipe-C-random-ccs-data-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-C-random-ccs-data-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-C-random-ccs-data-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-C-random-ccs-data-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-C-random-ccs-data-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-C-random-ccs-data-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-C-random-ccs-data-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-C-random-ccs-data-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-C-random-ccs-data-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-C-missing-ccs-buffer-y_tiled_ccs
+kms_ccs@pipe-C-missing-ccs-buffer-yf_tiled_ccs
+kms_ccs@pipe-C-missing-ccs-buffer-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-C-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-C-missing-ccs-buffer-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-C-missing-ccs-buffer-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-C-missing-ccs-buffer-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-C-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-C-ccs-on-another-bo-y_tiled_ccs
+kms_ccs@pipe-C-ccs-on-another-bo-yf_tiled_ccs
+kms_ccs@pipe-C-ccs-on-another-bo-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-C-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-C-ccs-on-another-bo-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-C-ccs-on-another-bo-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-C-ccs-on-another-bo-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-C-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-C-bad-aux-stride-y_tiled_ccs
+kms_ccs@pipe-C-bad-aux-stride-yf_tiled_ccs
+kms_ccs@pipe-C-bad-aux-stride-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-C-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-C-bad-aux-stride-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-C-bad-aux-stride-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-C-bad-aux-stride-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-C-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-C-crc-sprite-planes-basic-y_tiled_ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-yf_tiled_ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-C-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-C-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-D-bad-pixel-format-y_tiled_ccs
+kms_ccs@pipe-D-bad-pixel-format-yf_tiled_ccs
+kms_ccs@pipe-D-bad-pixel-format-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-D-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-D-bad-pixel-format-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-D-bad-pixel-format-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-D-bad-pixel-format-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-D-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-D-bad-pixel-format-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-D-bad-pixel-format-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-D-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-D-bad-rotation-90-y_tiled_ccs
+kms_ccs@pipe-D-bad-rotation-90-yf_tiled_ccs
+kms_ccs@pipe-D-bad-rotation-90-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-D-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-D-bad-rotation-90-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-D-bad-rotation-90-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-D-bad-rotation-90-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-D-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-D-bad-rotation-90-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-D-bad-rotation-90-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-D-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-D-crc-primary-basic-y_tiled_ccs
+kms_ccs@pipe-D-crc-primary-basic-yf_tiled_ccs
+kms_ccs@pipe-D-crc-primary-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-D-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-D-crc-primary-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-D-crc-primary-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-D-crc-primary-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-D-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-D-crc-primary-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-D-crc-primary-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-D-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-D-crc-primary-rotation-180-y_tiled_ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-yf_tiled_ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-D-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-D-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-D-random-ccs-data-y_tiled_ccs
+kms_ccs@pipe-D-random-ccs-data-yf_tiled_ccs
+kms_ccs@pipe-D-random-ccs-data-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-D-random-ccs-data-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-D-random-ccs-data-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-D-random-ccs-data-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-D-random-ccs-data-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-D-random-ccs-data-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-D-random-ccs-data-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-D-random-ccs-data-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-D-random-ccs-data-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-D-missing-ccs-buffer-y_tiled_ccs
+kms_ccs@pipe-D-missing-ccs-buffer-yf_tiled_ccs
+kms_ccs@pipe-D-missing-ccs-buffer-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-D-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-D-missing-ccs-buffer-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-D-missing-ccs-buffer-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-D-missing-ccs-buffer-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-D-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-D-ccs-on-another-bo-y_tiled_ccs
+kms_ccs@pipe-D-ccs-on-another-bo-yf_tiled_ccs
+kms_ccs@pipe-D-ccs-on-another-bo-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-D-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-D-ccs-on-another-bo-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-D-ccs-on-another-bo-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-D-ccs-on-another-bo-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-D-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-D-bad-aux-stride-y_tiled_ccs
+kms_ccs@pipe-D-bad-aux-stride-yf_tiled_ccs
+kms_ccs@pipe-D-bad-aux-stride-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-D-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-D-bad-aux-stride-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-D-bad-aux-stride-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-D-bad-aux-stride-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-D-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-D-crc-sprite-planes-basic-y_tiled_ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-yf_tiled_ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-D-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-D-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-E-bad-pixel-format-y_tiled_ccs
+kms_ccs@pipe-E-bad-pixel-format-yf_tiled_ccs
+kms_ccs@pipe-E-bad-pixel-format-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-E-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-E-bad-pixel-format-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-E-bad-pixel-format-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-E-bad-pixel-format-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-E-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-E-bad-pixel-format-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-E-bad-pixel-format-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-E-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-E-bad-rotation-90-y_tiled_ccs
+kms_ccs@pipe-E-bad-rotation-90-yf_tiled_ccs
+kms_ccs@pipe-E-bad-rotation-90-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-E-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-E-bad-rotation-90-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-E-bad-rotation-90-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-E-bad-rotation-90-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-E-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-E-bad-rotation-90-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-E-bad-rotation-90-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-E-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-E-crc-primary-basic-y_tiled_ccs
+kms_ccs@pipe-E-crc-primary-basic-yf_tiled_ccs
+kms_ccs@pipe-E-crc-primary-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-E-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-E-crc-primary-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-E-crc-primary-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-E-crc-primary-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-E-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-E-crc-primary-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-E-crc-primary-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-E-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-E-crc-primary-rotation-180-y_tiled_ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-yf_tiled_ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-E-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-E-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-E-random-ccs-data-y_tiled_ccs
+kms_ccs@pipe-E-random-ccs-data-yf_tiled_ccs
+kms_ccs@pipe-E-random-ccs-data-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-E-random-ccs-data-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-E-random-ccs-data-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-E-random-ccs-data-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-E-random-ccs-data-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-E-random-ccs-data-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-E-random-ccs-data-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-E-random-ccs-data-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-E-random-ccs-data-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-E-missing-ccs-buffer-y_tiled_ccs
+kms_ccs@pipe-E-missing-ccs-buffer-yf_tiled_ccs
+kms_ccs@pipe-E-missing-ccs-buffer-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-E-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-E-missing-ccs-buffer-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-E-missing-ccs-buffer-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-E-missing-ccs-buffer-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-E-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-E-ccs-on-another-bo-y_tiled_ccs
+kms_ccs@pipe-E-ccs-on-another-bo-yf_tiled_ccs
+kms_ccs@pipe-E-ccs-on-another-bo-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-E-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-E-ccs-on-another-bo-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-E-ccs-on-another-bo-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-E-ccs-on-another-bo-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-E-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-E-bad-aux-stride-y_tiled_ccs
+kms_ccs@pipe-E-bad-aux-stride-yf_tiled_ccs
+kms_ccs@pipe-E-bad-aux-stride-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-E-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-E-bad-aux-stride-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-E-bad-aux-stride-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-E-bad-aux-stride-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-E-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-E-crc-sprite-planes-basic-y_tiled_ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-yf_tiled_ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-E-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-E-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-F-bad-pixel-format-y_tiled_ccs
+kms_ccs@pipe-F-bad-pixel-format-yf_tiled_ccs
+kms_ccs@pipe-F-bad-pixel-format-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-F-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-F-bad-pixel-format-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-F-bad-pixel-format-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-F-bad-pixel-format-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-F-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-F-bad-pixel-format-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-F-bad-pixel-format-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-F-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-F-bad-rotation-90-y_tiled_ccs
+kms_ccs@pipe-F-bad-rotation-90-yf_tiled_ccs
+kms_ccs@pipe-F-bad-rotation-90-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-F-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-F-bad-rotation-90-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-F-bad-rotation-90-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-F-bad-rotation-90-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-F-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-F-bad-rotation-90-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-F-bad-rotation-90-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-F-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-F-crc-primary-basic-y_tiled_ccs
+kms_ccs@pipe-F-crc-primary-basic-yf_tiled_ccs
+kms_ccs@pipe-F-crc-primary-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-F-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-F-crc-primary-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-F-crc-primary-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-F-crc-primary-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-F-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-F-crc-primary-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-F-crc-primary-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-F-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-F-crc-primary-rotation-180-y_tiled_ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-yf_tiled_ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-F-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-F-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-F-random-ccs-data-y_tiled_ccs
+kms_ccs@pipe-F-random-ccs-data-yf_tiled_ccs
+kms_ccs@pipe-F-random-ccs-data-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-F-random-ccs-data-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-F-random-ccs-data-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-F-random-ccs-data-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-F-random-ccs-data-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-F-random-ccs-data-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-F-random-ccs-data-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-F-random-ccs-data-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-F-random-ccs-data-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-F-missing-ccs-buffer-y_tiled_ccs
+kms_ccs@pipe-F-missing-ccs-buffer-yf_tiled_ccs
+kms_ccs@pipe-F-missing-ccs-buffer-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-F-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-F-missing-ccs-buffer-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-F-missing-ccs-buffer-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-F-missing-ccs-buffer-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-F-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-F-ccs-on-another-bo-y_tiled_ccs
+kms_ccs@pipe-F-ccs-on-another-bo-yf_tiled_ccs
+kms_ccs@pipe-F-ccs-on-another-bo-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-F-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-F-ccs-on-another-bo-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-F-ccs-on-another-bo-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-F-ccs-on-another-bo-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-F-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-F-bad-aux-stride-y_tiled_ccs
+kms_ccs@pipe-F-bad-aux-stride-yf_tiled_ccs
+kms_ccs@pipe-F-bad-aux-stride-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-F-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-F-bad-aux-stride-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-F-bad-aux-stride-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-F-bad-aux-stride-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-F-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-F-crc-sprite-planes-basic-y_tiled_ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-yf_tiled_ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-F-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-F-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-G-bad-pixel-format-y_tiled_ccs
+kms_ccs@pipe-G-bad-pixel-format-yf_tiled_ccs
+kms_ccs@pipe-G-bad-pixel-format-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-G-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-G-bad-pixel-format-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-G-bad-pixel-format-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-G-bad-pixel-format-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-G-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-G-bad-pixel-format-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-G-bad-pixel-format-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-G-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-G-bad-rotation-90-y_tiled_ccs
+kms_ccs@pipe-G-bad-rotation-90-yf_tiled_ccs
+kms_ccs@pipe-G-bad-rotation-90-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-G-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-G-bad-rotation-90-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-G-bad-rotation-90-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-G-bad-rotation-90-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-G-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-G-bad-rotation-90-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-G-bad-rotation-90-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-G-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-G-crc-primary-basic-y_tiled_ccs
+kms_ccs@pipe-G-crc-primary-basic-yf_tiled_ccs
+kms_ccs@pipe-G-crc-primary-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-G-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-G-crc-primary-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-G-crc-primary-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-G-crc-primary-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-G-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-G-crc-primary-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-G-crc-primary-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-G-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-G-crc-primary-rotation-180-y_tiled_ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-yf_tiled_ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-G-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-G-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-G-random-ccs-data-y_tiled_ccs
+kms_ccs@pipe-G-random-ccs-data-yf_tiled_ccs
+kms_ccs@pipe-G-random-ccs-data-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-G-random-ccs-data-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-G-random-ccs-data-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-G-random-ccs-data-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-G-random-ccs-data-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-G-random-ccs-data-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-G-random-ccs-data-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-G-random-ccs-data-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-G-random-ccs-data-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-G-missing-ccs-buffer-y_tiled_ccs
+kms_ccs@pipe-G-missing-ccs-buffer-yf_tiled_ccs
+kms_ccs@pipe-G-missing-ccs-buffer-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-G-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-G-missing-ccs-buffer-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-G-missing-ccs-buffer-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-G-missing-ccs-buffer-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-G-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-G-ccs-on-another-bo-y_tiled_ccs
+kms_ccs@pipe-G-ccs-on-another-bo-yf_tiled_ccs
+kms_ccs@pipe-G-ccs-on-another-bo-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-G-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-G-ccs-on-another-bo-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-G-ccs-on-another-bo-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-G-ccs-on-another-bo-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-G-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-G-bad-aux-stride-y_tiled_ccs
+kms_ccs@pipe-G-bad-aux-stride-yf_tiled_ccs
+kms_ccs@pipe-G-bad-aux-stride-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-G-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-G-bad-aux-stride-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-G-bad-aux-stride-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-G-bad-aux-stride-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-G-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-G-crc-sprite-planes-basic-y_tiled_ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-yf_tiled_ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-G-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-G-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-H-bad-pixel-format-y_tiled_ccs
+kms_ccs@pipe-H-bad-pixel-format-yf_tiled_ccs
+kms_ccs@pipe-H-bad-pixel-format-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-H-bad-pixel-format-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-H-bad-pixel-format-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-H-bad-pixel-format-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-H-bad-pixel-format-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-H-bad-pixel-format-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-H-bad-pixel-format-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-H-bad-pixel-format-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-H-bad-pixel-format-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-H-bad-rotation-90-y_tiled_ccs
+kms_ccs@pipe-H-bad-rotation-90-yf_tiled_ccs
+kms_ccs@pipe-H-bad-rotation-90-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-H-bad-rotation-90-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-H-bad-rotation-90-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-H-bad-rotation-90-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-H-bad-rotation-90-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-H-bad-rotation-90-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-H-bad-rotation-90-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-H-bad-rotation-90-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-H-bad-rotation-90-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-H-crc-primary-basic-y_tiled_ccs
+kms_ccs@pipe-H-crc-primary-basic-yf_tiled_ccs
+kms_ccs@pipe-H-crc-primary-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-H-crc-primary-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-H-crc-primary-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-H-crc-primary-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-H-crc-primary-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-H-crc-primary-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-H-crc-primary-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-H-crc-primary-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-H-crc-primary-basic-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-H-crc-primary-rotation-180-y_tiled_ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-yf_tiled_ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-H-crc-primary-rotation-180-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-H-crc-primary-rotation-180-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-H-random-ccs-data-y_tiled_ccs
+kms_ccs@pipe-H-random-ccs-data-yf_tiled_ccs
+kms_ccs@pipe-H-random-ccs-data-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-H-random-ccs-data-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-H-random-ccs-data-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-H-random-ccs-data-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-H-random-ccs-data-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-H-random-ccs-data-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-H-random-ccs-data-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-H-random-ccs-data-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-H-random-ccs-data-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-H-missing-ccs-buffer-y_tiled_ccs
+kms_ccs@pipe-H-missing-ccs-buffer-yf_tiled_ccs
+kms_ccs@pipe-H-missing-ccs-buffer-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-H-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-H-missing-ccs-buffer-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-H-missing-ccs-buffer-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-H-missing-ccs-buffer-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-H-missing-ccs-buffer-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-H-ccs-on-another-bo-y_tiled_ccs
+kms_ccs@pipe-H-ccs-on-another-bo-yf_tiled_ccs
+kms_ccs@pipe-H-ccs-on-another-bo-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-H-ccs-on-another-bo-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-H-ccs-on-another-bo-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-H-ccs-on-another-bo-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-H-ccs-on-another-bo-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-H-ccs-on-another-bo-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-H-bad-aux-stride-y_tiled_ccs
+kms_ccs@pipe-H-bad-aux-stride-yf_tiled_ccs
+kms_ccs@pipe-H-bad-aux-stride-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-H-bad-aux-stride-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-H-bad-aux-stride-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-H-bad-aux-stride-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-H-bad-aux-stride-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-H-bad-aux-stride-4_tiled_mtl_rc_ccs_cc
+kms_ccs@pipe-H-crc-sprite-planes-basic-y_tiled_ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-yf_tiled_ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc
+kms_ccs@pipe-H-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_dg2_rc_ccs_cc
+kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_mtl_mc_ccs
+kms_ccs@pipe-H-crc-sprite-planes-basic-4_tiled_mtl_rc_ccs_cc
+kms_cdclk@plane-scaling
+kms_cdclk@mode-transition
+kms_cdclk@mode-transition-all-outputs
+kms_color@degamma
+kms_color@gamma
+kms_color@legacy-gamma
+kms_color@legacy-gamma-reset
+kms_color@ctm-red-to-blue
+kms_color@ctm-green-to-red
+kms_color@ctm-blue-to-red
+kms_color@ctm-max
+kms_color@ctm-negative
+kms_color@ctm-0-25
+kms_color@ctm-0-50
+kms_color@ctm-0-75
+kms_color@ctm-signed
+kms_color@deep-color
+kms_color@invalid-gamma-lut-sizes
+kms_color@invalid-degamma-lut-sizes
+kms_color@invalid-ctm-matrix-sizes
+kms_concurrent@pipe-A
+kms_concurrent@pipe-B
+kms_concurrent@pipe-C
+kms_concurrent@pipe-D
+kms_concurrent@pipe-E
+kms_concurrent@pipe-F
+kms_concurrent@pipe-G
+kms_concurrent@pipe-H
+kms_content_protection@legacy
+kms_content_protection@atomic
+kms_content_protection@atomic-dpms
+kms_content_protection@LIC
+kms_content_protection@type1
+kms_content_protection@mei_interface
+kms_content_protection@content_type_change
+kms_content_protection@uevent
+kms_content_protection@srm
+kms_content_protection@dp-mst-type-0
+kms_content_protection@dp-mst-lic-type-0
+kms_content_protection@dp-mst-type-1
+kms_content_protection@dp-mst-lic-type-1
+kms_cursor_crc@cursor-size-change
+kms_cursor_crc@cursor-alpha-opaque
+kms_cursor_crc@cursor-alpha-transparent
+kms_cursor_crc@cursor-dpms
+kms_cursor_crc@cursor-suspend
+kms_cursor_crc@cursor-onscreen-32x32
+kms_cursor_crc@cursor-offscreen-32x32
+kms_cursor_crc@cursor-sliding-32x32
+kms_cursor_crc@cursor-random-32x32
+kms_cursor_crc@cursor-rapid-movement-32x32
+kms_cursor_crc@cursor-onscreen-32x10
+kms_cursor_crc@cursor-offscreen-32x10
+kms_cursor_crc@cursor-sliding-32x10
+kms_cursor_crc@cursor-random-32x10
+kms_cursor_crc@cursor-rapid-movement-32x10
+kms_cursor_crc@cursor-onscreen-64x64
+kms_cursor_crc@cursor-offscreen-64x64
+kms_cursor_crc@cursor-sliding-64x64
+kms_cursor_crc@cursor-random-64x64
+kms_cursor_crc@cursor-rapid-movement-64x64
+kms_cursor_crc@cursor-onscreen-64x21
+kms_cursor_crc@cursor-offscreen-64x21
+kms_cursor_crc@cursor-sliding-64x21
+kms_cursor_crc@cursor-random-64x21
+kms_cursor_crc@cursor-rapid-movement-64x21
+kms_cursor_crc@cursor-onscreen-128x128
+kms_cursor_crc@cursor-offscreen-128x128
+kms_cursor_crc@cursor-sliding-128x128
+kms_cursor_crc@cursor-random-128x128
+kms_cursor_crc@cursor-rapid-movement-128x128
+kms_cursor_crc@cursor-onscreen-128x42
+kms_cursor_crc@cursor-offscreen-128x42
+kms_cursor_crc@cursor-sliding-128x42
+kms_cursor_crc@cursor-random-128x42
+kms_cursor_crc@cursor-rapid-movement-128x42
+kms_cursor_crc@cursor-onscreen-256x256
+kms_cursor_crc@cursor-offscreen-256x256
+kms_cursor_crc@cursor-sliding-256x256
+kms_cursor_crc@cursor-random-256x256
+kms_cursor_crc@cursor-rapid-movement-256x256
+kms_cursor_crc@cursor-onscreen-256x85
+kms_cursor_crc@cursor-offscreen-256x85
+kms_cursor_crc@cursor-sliding-256x85
+kms_cursor_crc@cursor-random-256x85
+kms_cursor_crc@cursor-rapid-movement-256x85
+kms_cursor_crc@cursor-onscreen-512x512
+kms_cursor_crc@cursor-offscreen-512x512
+kms_cursor_crc@cursor-sliding-512x512
+kms_cursor_crc@cursor-random-512x512
+kms_cursor_crc@cursor-rapid-movement-512x512
+kms_cursor_crc@cursor-onscreen-512x170
+kms_cursor_crc@cursor-offscreen-512x170
+kms_cursor_crc@cursor-sliding-512x170
+kms_cursor_crc@cursor-random-512x170
+kms_cursor_crc@cursor-rapid-movement-512x170
+kms_cursor_crc@cursor-onscreen-max-size
+kms_cursor_crc@cursor-offscreen-max-size
+kms_cursor_crc@cursor-sliding-max-size
+kms_cursor_crc@cursor-random-max-size
+kms_cursor_crc@cursor-rapid-movement-max-size
+kms_cursor_legacy@single-bo
+kms_cursor_legacy@single-move
+kms_cursor_legacy@forked-bo
+kms_cursor_legacy@forked-move
+kms_cursor_legacy@torture-bo
+kms_cursor_legacy@torture-move
+kms_cursor_legacy@nonblocking-modeset-vs-cursor-atomic
+kms_cursor_legacy@long-nonblocking-modeset-vs-cursor-atomic
+kms_cursor_legacy@2x-flip-vs-cursor-legacy
+kms_cursor_legacy@2x-flip-vs-cursor-atomic
+kms_cursor_legacy@2x-long-flip-vs-cursor-legacy
+kms_cursor_legacy@2x-long-flip-vs-cursor-atomic
+kms_cursor_legacy@2x-nonblocking-modeset-vs-cursor-atomic
+kms_cursor_legacy@2x-long-nonblocking-modeset-vs-cursor-atomic
+kms_cursor_legacy@2x-cursor-vs-flip-legacy
+kms_cursor_legacy@2x-long-cursor-vs-flip-legacy
+kms_cursor_legacy@2x-cursor-vs-flip-atomic
+kms_cursor_legacy@2x-long-cursor-vs-flip-atomic
+kms_cursor_legacy@flip-vs-cursor-crc-legacy
+kms_cursor_legacy@flip-vs-cursor-crc-atomic
+kms_cursor_legacy@flip-vs-cursor-busy-crc-legacy
+kms_cursor_legacy@flip-vs-cursor-busy-crc-atomic
+kms_cursor_legacy@basic-flip-before-cursor-legacy
+kms_cursor_legacy@basic-busy-flip-before-cursor-legacy
+kms_cursor_legacy@basic-flip-after-cursor-legacy
+kms_cursor_legacy@basic-flip-before-cursor-varying-size
+kms_cursor_legacy@basic-busy-flip-before-cursor-varying-size
+kms_cursor_legacy@basic-flip-after-cursor-varying-size
+kms_cursor_legacy@short-flip-before-cursor-toggle
+kms_cursor_legacy@short-busy-flip-before-cursor-toggle
+kms_cursor_legacy@short-flip-after-cursor-toggle
+kms_cursor_legacy@basic-flip-before-cursor-atomic
+kms_cursor_legacy@basic-busy-flip-before-cursor-atomic
+kms_cursor_legacy@basic-flip-after-cursor-atomic
+kms_cursor_legacy@short-flip-before-cursor-atomic-transitions
+kms_cursor_legacy@short-busy-flip-before-cursor-atomic-transitions
+kms_cursor_legacy@short-flip-after-cursor-atomic-transitions
+kms_cursor_legacy@short-flip-before-cursor-atomic-transitions-varying-size
+kms_cursor_legacy@short-busy-flip-before-cursor-atomic-transitions-varying-size
+kms_cursor_legacy@short-flip-after-cursor-atomic-transitions-varying-size
+kms_cursor_legacy@cursor-vs-flip-legacy
+kms_cursor_legacy@flip-vs-cursor-legacy
+kms_cursor_legacy@cursorA-vs-flipA-legacy
+kms_cursor_legacy@cursorA-vs-flipB-legacy
+kms_cursor_legacy@cursorB-vs-flipA-legacy
+kms_cursor_legacy@cursorB-vs-flipB-legacy
+kms_cursor_legacy@cursor-vs-flip-varying-size
+kms_cursor_legacy@flip-vs-cursor-varying-size
+kms_cursor_legacy@cursorA-vs-flipA-varying-size
+kms_cursor_legacy@cursorA-vs-flipB-varying-size
+kms_cursor_legacy@cursorB-vs-flipA-varying-size
+kms_cursor_legacy@cursorB-vs-flipB-varying-size
+kms_cursor_legacy@cursor-vs-flip-toggle
+kms_cursor_legacy@flip-vs-cursor-toggle
+kms_cursor_legacy@cursorA-vs-flipA-toggle
+kms_cursor_legacy@cursorA-vs-flipB-toggle
+kms_cursor_legacy@cursorB-vs-flipA-toggle
+kms_cursor_legacy@cursorB-vs-flipB-toggle
+kms_cursor_legacy@cursor-vs-flip-atomic
+kms_cursor_legacy@flip-vs-cursor-atomic
+kms_cursor_legacy@cursorA-vs-flipA-atomic
+kms_cursor_legacy@cursorA-vs-flipB-atomic
+kms_cursor_legacy@cursorB-vs-flipA-atomic
+kms_cursor_legacy@cursorB-vs-flipB-atomic
+kms_cursor_legacy@cursor-vs-flip-atomic-transitions
+kms_cursor_legacy@flip-vs-cursor-atomic-transitions
+kms_cursor_legacy@cursorA-vs-flipA-atomic-transitions
+kms_cursor_legacy@cursorA-vs-flipB-atomic-transitions
+kms_cursor_legacy@cursorB-vs-flipA-atomic-transitions
+kms_cursor_legacy@cursorB-vs-flipB-atomic-transitions
+kms_cursor_legacy@cursor-vs-flip-atomic-transitions-varying-size
+kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size
+kms_cursor_legacy@cursorA-vs-flipA-atomic-transitions-varying-size
+kms_cursor_legacy@cursorA-vs-flipB-atomic-transitions-varying-size
+kms_cursor_legacy@cursorB-vs-flipA-atomic-transitions-varying-size
+kms_cursor_legacy@cursorB-vs-flipB-atomic-transitions-varying-size
+kms_dither@FB-8BPC-Vs-Panel-6BPC
+kms_dither@FB-8BPC-Vs-Panel-8BPC
+kms_dp_aux_dev
+kms_tiled_display@basic-test-pattern
+kms_tiled_display@basic-test-pattern-with-chamelium
+kms_draw_crc@draw-method-mmap-cpu
+kms_draw_crc@draw-method-mmap-gtt
+kms_draw_crc@draw-method-mmap-wc
+kms_draw_crc@draw-method-pwrite
+kms_draw_crc@draw-method-blt
+kms_draw_crc@draw-method-render
+kms_draw_crc@fill-fb
+kms_dsc@dsc-basic
+kms_dsc@dsc-with-formats
+kms_dsc@dsc-with-bpc
+kms_dsc@dsc-with-bpc-formats
+kms_dsc@dsc-with-output-formats
+kms_fbcon_fbt@fbc
+kms_fbcon_fbt@psr
+kms_fbcon_fbt@fbc-suspend
+kms_fbcon_fbt@psr-suspend
+kms_fence_pin_leak
+kms_flip@nonblocking-read
+kms_flip@wf_vblank-ts-check
+kms_flip@2x-wf_vblank-ts-check
+kms_flip@blocking-wf_vblank
+kms_flip@2x-blocking-wf_vblank
+kms_flip@absolute-wf_vblank
+kms_flip@2x-absolute-wf_vblank
+kms_flip@blocking-absolute-wf_vblank
+kms_flip@2x-blocking-absolute-wf_vblank
+kms_flip@basic-plain-flip
+kms_flip@2x-plain-flip
+kms_flip@busy-flip
+kms_flip@2x-busy-flip
+kms_flip@flip-vs-fences
+kms_flip@2x-flip-vs-fences
+kms_flip@plain-flip-ts-check
+kms_flip@2x-plain-flip-ts-check
+kms_flip@plain-flip-fb-recreate
+kms_flip@2x-plain-flip-fb-recreate
+kms_flip@flip-vs-rmfb
+kms_flip@2x-flip-vs-rmfb
+kms_flip@basic-flip-vs-dpms
+kms_flip@2x-flip-vs-dpms
+kms_flip@flip-vs-panning
+kms_flip@2x-flip-vs-panning
+kms_flip@basic-flip-vs-modeset
+kms_flip@2x-flip-vs-modeset
+kms_flip@flip-vs-expired-vblank
+kms_flip@2x-flip-vs-expired-vblank
+kms_flip@flip-vs-absolute-wf_vblank
+kms_flip@2x-flip-vs-absolute-wf_vblank
+kms_flip@basic-flip-vs-wf_vblank
+kms_flip@2x-flip-vs-wf_vblank
+kms_flip@flip-vs-blocking-wf-vblank
+kms_flip@2x-flip-vs-blocking-wf-vblank
+kms_flip@flip-vs-modeset-vs-hang
+kms_flip@2x-flip-vs-modeset-vs-hang
+kms_flip@flip-vs-panning-vs-hang
+kms_flip@2x-flip-vs-panning-vs-hang
+kms_flip@flip-vs-dpms-off-vs-modeset
+kms_flip@2x-flip-vs-dpms-off-vs-modeset
+kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset
+kms_flip@2x-single-buffer-flip-vs-dpms-off-vs-modeset
+kms_flip@dpms-off-confusion
+kms_flip@nonexisting-fb
+kms_flip@2x-nonexisting-fb
+kms_flip@dpms-vs-vblank-race
+kms_flip@2x-dpms-vs-vblank-race
+kms_flip@modeset-vs-vblank-race
+kms_flip@2x-modeset-vs-vblank-race
+kms_flip@bo-too-big
+kms_flip@flip-vs-suspend
+kms_flip@2x-flip-vs-suspend
+kms_flip@wf_vblank-ts-check-interruptible
+kms_flip@2x-wf_vblank-ts-check-interruptible
+kms_flip@absolute-wf_vblank-interruptible
+kms_flip@2x-absolute-wf_vblank-interruptible
+kms_flip@blocking-absolute-wf_vblank-interruptible
+kms_flip@2x-blocking-absolute-wf_vblank-interruptible
+kms_flip@plain-flip-interruptible
+kms_flip@2x-plain-flip-interruptible
+kms_flip@flip-vs-fences-interruptible
+kms_flip@2x-flip-vs-fences-interruptible
+kms_flip@plain-flip-ts-check-interruptible
+kms_flip@2x-plain-flip-ts-check-interruptible
+kms_flip@plain-flip-fb-recreate-interruptible
+kms_flip@2x-plain-flip-fb-recreate-interruptible
+kms_flip@flip-vs-rmfb-interruptible
+kms_flip@2x-flip-vs-rmfb-interruptible
+kms_flip@flip-vs-panning-interruptible
+kms_flip@2x-flip-vs-panning-interruptible
+kms_flip@flip-vs-expired-vblank-interruptible
+kms_flip@2x-flip-vs-expired-vblank-interruptible
+kms_flip@flip-vs-absolute-wf_vblank-interruptible
+kms_flip@2x-flip-vs-absolute-wf_vblank-interruptible
+kms_flip@flip-vs-wf_vblank-interruptible
+kms_flip@2x-flip-vs-wf_vblank-interruptible
+kms_flip@flip-vs-dpms-off-vs-modeset-interruptible
+kms_flip@2x-flip-vs-dpms-off-vs-modeset-interruptible
+kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible
+kms_flip@2x-single-buffer-flip-vs-dpms-off-vs-modeset-interruptible
+kms_flip@dpms-off-confusion-interruptible
+kms_flip@nonexisting-fb-interruptible
+kms_flip@2x-nonexisting-fb-interruptible
+kms_flip@dpms-vs-vblank-race-interruptible
+kms_flip@2x-dpms-vs-vblank-race-interruptible
+kms_flip@modeset-vs-vblank-race-interruptible
+kms_flip@2x-modeset-vs-vblank-race-interruptible
+kms_flip@bo-too-big-interruptible
+kms_flip@flip-vs-suspend-interruptible
+kms_flip@2x-flip-vs-suspend-interruptible
+kms_flip_event_leak@basic
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-32bpp-yftile-to-64bpp-yftile-downscaling
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling
+kms_flip_scaled_crc@flip-32bpp-4tile-to-64bpp-4tile-downscaling
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-64bpp-yftile-to-32bpp-yftile-downscaling
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling
+kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tile-downscaling
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-64bpp-yftile-to-16bpp-yftile-downscaling
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling
+kms_flip_scaled_crc@flip-64bpp-4tile-to-16bpp-4tile-downscaling
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-32bpp-yftileccs-to-64bpp-yftile-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytilegen12rcccs-downscaling
+kms_flip_scaled_crc@flip-32bpp-4tile-to-32bpp-4tiledg2rcccs-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs-downscaling
+kms_flip_scaled_crc@flip-32bpp-yftile-to-32bpp-yftileccs-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling
+kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tiledg2rcccs-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-32bpp-yftile-to-64bpp-yftile-upscaling
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling
+kms_flip_scaled_crc@flip-32bpp-4tile-to-64bpp-4tile-upscaling
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-yftile-to-32bpp-yftile-upscaling
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling
+kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tile-upscaling
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-yftile-to-16bpp-yftile-upscaling
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling
+kms_flip_scaled_crc@flip-64bpp-4tile-to-16bpp-4tile-upscaling
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-32bpp-yftileccs-to-64bpp-yftile-upscaling
+kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling
+kms_flip_scaled_crc@flip-32bpp-4tile-to-32bpp-4tiledg2rcccs-upscaling
+kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs-upscaling
+kms_flip_scaled_crc@flip-32bpp-yftile-to-32bpp-yftileccs-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling
+kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tiledg2rcccs-upscaling
+kms_force_connector_basic@force-load-detect
+kms_force_connector_basic@force-connector-state
+kms_force_connector_basic@force-edid
+kms_force_connector_basic@prune-stale-modes
+kms_frontbuffer_tracking@fbc-1p-rte
+kms_frontbuffer_tracking@fbc-2p-rte
+kms_frontbuffer_tracking@psr-1p-rte
+kms_frontbuffer_tracking@psr-2p-rte
+kms_frontbuffer_tracking@fbcpsr-1p-rte
+kms_frontbuffer_tracking@fbcpsr-2p-rte
+kms_frontbuffer_tracking@drrs-1p-rte
+kms_frontbuffer_tracking@drrs-2p-rte
+kms_frontbuffer_tracking@fbcdrrs-1p-rte
+kms_frontbuffer_tracking@fbcdrrs-2p-rte
+kms_frontbuffer_tracking@psrdrrs-1p-rte
+kms_frontbuffer_tracking@psrdrrs-2p-rte
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-rte
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-rte
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbc-1p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbc-1p-offscren-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbc-2p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@psr-1p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@psr-1p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-1p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-1p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-1p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@psr-1p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@psr-1p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@psr-1p-offscren-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-1p-offscren-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-1p-offscren-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-1p-offscren-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@psr-1p-offscren-pri-indfb-draw-blt
+kms_frontbuffer_tracking@psr-1p-offscren-pri-indfb-draw-render
+kms_frontbuffer_tracking@psr-1p-offscren-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-1p-offscren-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-1p-offscren-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-1p-offscren-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@psr-1p-offscren-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@psr-1p-offscren-pri-shrfb-draw-render
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@psr-2p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@psr-2p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-2p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-2p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-2p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@psr-2p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@psr-2p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@psr-2p-scndscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@psr-2p-scndscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-2p-scndscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-2p-scndscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psr-2p-scndscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@psr-2p-scndscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@psr-2p-scndscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@drrs-1p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@drrs-1p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-1p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-1p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-1p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-1p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@drrs-1p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@drrs-1p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-1p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-1p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-1p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-1p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@drrs-1p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-indfb-draw-blt
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-indfb-draw-render
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@drrs-1p-offscren-pri-shrfb-draw-render
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@drrs-2p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@drrs-2p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-2p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-2p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-2p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-2p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@drrs-2p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@drrs-2p-scndscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-2p-scndscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-2p-scndscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-2p-scndscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@drrs-2p-scndscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-2p-scndscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-2p-scndscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@drrs-2p-scndscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-offscren-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-indfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-indfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-1p-offscren-pri-shrfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-offscren-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-shrfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-shrfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-shrfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-shrfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-shrfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-pri-shrfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-cur-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-cur-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-cur-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-cur-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-cur-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-cur-indfb-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-spr-indfb-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-spr-indfb-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-spr-indfb-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-spr-indfb-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-spr-indfb-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-spr-indfb-draw-render
+kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbc-1p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbc-1p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbc-1p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbc-2p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbc-2p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbc-2p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbc-2p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbc-2p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbc-2p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbc-2p-scndscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@psr-1p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@psr-1p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@psr-1p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@psr-1p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@psr-1p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@psr-1p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@psr-2p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@psr-2p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@psr-2p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@psr-2p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@psr-2p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@psr-2p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@psr-2p-scndscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@psr-2p-scndscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@psr-2p-scndscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@psr-2p-scndscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@psr-2p-scndscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@psr-2p-scndscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@drrs-1p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@drrs-1p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@drrs-1p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@drrs-1p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@drrs-1p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@drrs-1p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@drrs-2p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@drrs-2p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@drrs-2p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@drrs-2p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@drrs-2p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@drrs-2p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@drrs-2p-scndscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-indfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-indfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-indfb-plflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-shrfb-pgflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-shrfb-msflip-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-shrfb-plflip-blt
+kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbc-1p-shrfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbc-2p-indfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbc-2p-shrfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-1p-indfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-1p-shrfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-2p-indfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-2p-shrfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-1p-indfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-1p-shrfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-2p-indfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-2p-shrfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-indfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-shrfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-indfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-shrfb-fliptrack-mmap-gtt
+kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@psr-1p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@psr-1p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@psr-1p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@psr-2p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@psr-2p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-move
+kms_frontbuffer_tracking@psr-2p-scndscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@psr-2p-scndscrn-spr-indfb-move
+kms_frontbuffer_tracking@psr-2p-scndscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@drrs-1p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@drrs-1p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@drrs-1p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@drrs-1p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@drrs-2p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@drrs-2p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@drrs-2p-scndscrn-cur-indfb-move
+kms_frontbuffer_tracking@drrs-2p-scndscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@drrs-2p-scndscrn-spr-indfb-move
+kms_frontbuffer_tracking@drrs-2p-scndscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-cur-indfb-move
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-spr-indfb-move
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-cur-indfb-move
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-cur-indfb-onoff
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-spr-indfb-move
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-spr-indfb-onoff
+kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@psr-1p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@psr-2p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@psr-2p-scndscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbcpsr-2p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@drrs-1p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@drrs-2p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@drrs-2p-scndscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbcdrrs-1p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbcdrrs-2p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbcdrrs-2p-scndscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@psrdrrs-1p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@psrdrrs-2p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@psrdrrs-2p-scndscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-primscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-scndscrn-spr-indfb-fullscreen
+kms_frontbuffer_tracking@fbc-1p-pri-indfb-multidraw
+kms_frontbuffer_tracking@fbc-2p-pri-indfb-multidraw
+kms_frontbuffer_tracking@psr-1p-pri-indfb-multidraw
+kms_frontbuffer_tracking@psr-2p-pri-indfb-multidraw
+kms_frontbuffer_tracking@fbcpsr-1p-pri-indfb-multidraw
+kms_frontbuffer_tracking@fbcpsr-2p-pri-indfb-multidraw
+kms_frontbuffer_tracking@drrs-1p-pri-indfb-multidraw
+kms_frontbuffer_tracking@drrs-2p-pri-indfb-multidraw
+kms_frontbuffer_tracking@fbcdrrs-1p-pri-indfb-multidraw
+kms_frontbuffer_tracking@fbcdrrs-2p-pri-indfb-multidraw
+kms_frontbuffer_tracking@psrdrrs-1p-pri-indfb-multidraw
+kms_frontbuffer_tracking@psrdrrs-2p-pri-indfb-multidraw
+kms_frontbuffer_tracking@fbcpsrdrrs-1p-pri-indfb-multidraw
+kms_frontbuffer_tracking@fbcpsrdrrs-2p-pri-indfb-multidraw
+kms_frontbuffer_tracking@fbc-farfromfence-mmap-gtt
+kms_frontbuffer_tracking@psr-farfromfence-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-farfromfence-mmap-gtt
+kms_frontbuffer_tracking@drrs-farfromfence-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-farfromfence-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-farfromfence-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-farfromfence-mmap-gtt
+kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu
+kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-gtt
+kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-wc
+kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite
+kms_frontbuffer_tracking@fbc-rgb101010-draw-pwrite
+kms_frontbuffer_tracking@fbc-rgb565-draw-blt
+kms_frontbuffer_tracking@fbc-rgb101010-draw-blt
+kms_frontbuffer_tracking@fbc-rgb565-draw-render
+kms_frontbuffer_tracking@fbc-rgb101010-draw-render
+kms_frontbuffer_tracking@psr-rgb565-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-rgb101010-draw-mmap-cpu
+kms_frontbuffer_tracking@psr-rgb565-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-rgb101010-draw-mmap-gtt
+kms_frontbuffer_tracking@psr-rgb565-draw-mmap-wc
+kms_frontbuffer_tracking@psr-rgb101010-draw-mmap-wc
+kms_frontbuffer_tracking@psr-rgb565-draw-pwrite
+kms_frontbuffer_tracking@psr-rgb101010-draw-pwrite
+kms_frontbuffer_tracking@psr-rgb565-draw-blt
+kms_frontbuffer_tracking@psr-rgb101010-draw-blt
+kms_frontbuffer_tracking@psr-rgb565-draw-render
+kms_frontbuffer_tracking@psr-rgb101010-draw-render
+kms_frontbuffer_tracking@fbcpsr-rgb565-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-rgb101010-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsr-rgb565-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-rgb101010-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsr-rgb565-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-rgb101010-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsr-rgb565-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-rgb101010-draw-pwrite
+kms_frontbuffer_tracking@fbcpsr-rgb565-draw-blt
+kms_frontbuffer_tracking@fbcpsr-rgb101010-draw-blt
+kms_frontbuffer_tracking@fbcpsr-rgb565-draw-render
+kms_frontbuffer_tracking@fbcpsr-rgb101010-draw-render
+kms_frontbuffer_tracking@drrs-rgb565-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-rgb101010-draw-mmap-cpu
+kms_frontbuffer_tracking@drrs-rgb565-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-rgb101010-draw-mmap-gtt
+kms_frontbuffer_tracking@drrs-rgb565-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-rgb101010-draw-mmap-wc
+kms_frontbuffer_tracking@drrs-rgb565-draw-pwrite
+kms_frontbuffer_tracking@drrs-rgb101010-draw-pwrite
+kms_frontbuffer_tracking@drrs-rgb565-draw-blt
+kms_frontbuffer_tracking@drrs-rgb101010-draw-blt
+kms_frontbuffer_tracking@drrs-rgb565-draw-render
+kms_frontbuffer_tracking@drrs-rgb101010-draw-render
+kms_frontbuffer_tracking@fbcdrrs-rgb565-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-rgb101010-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcdrrs-rgb565-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-rgb101010-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcdrrs-rgb565-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-rgb101010-draw-mmap-wc
+kms_frontbuffer_tracking@fbcdrrs-rgb565-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-rgb101010-draw-pwrite
+kms_frontbuffer_tracking@fbcdrrs-rgb565-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-rgb101010-draw-blt
+kms_frontbuffer_tracking@fbcdrrs-rgb565-draw-render
+kms_frontbuffer_tracking@fbcdrrs-rgb101010-draw-render
+kms_frontbuffer_tracking@psrdrrs-rgb565-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-rgb101010-draw-mmap-cpu
+kms_frontbuffer_tracking@psrdrrs-rgb565-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-rgb101010-draw-mmap-gtt
+kms_frontbuffer_tracking@psrdrrs-rgb565-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-rgb101010-draw-mmap-wc
+kms_frontbuffer_tracking@psrdrrs-rgb565-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-rgb101010-draw-pwrite
+kms_frontbuffer_tracking@psrdrrs-rgb565-draw-blt
+kms_frontbuffer_tracking@psrdrrs-rgb101010-draw-blt
+kms_frontbuffer_tracking@psrdrrs-rgb565-draw-render
+kms_frontbuffer_tracking@psrdrrs-rgb101010-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb565-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb101010-draw-mmap-cpu
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb565-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb101010-draw-mmap-gtt
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb565-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb101010-draw-mmap-wc
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb565-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb101010-draw-pwrite
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb565-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb101010-draw-blt
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb565-draw-render
+kms_frontbuffer_tracking@fbcpsrdrrs-rgb101010-draw-render
+kms_frontbuffer_tracking@fbc-indfb-scaledprimary
+kms_frontbuffer_tracking@fbc-shrfb-scaledprimary
+kms_frontbuffer_tracking@psr-indfb-scaledprimary
+kms_frontbuffer_tracking@psr-shrfb-scaledprimary
+kms_frontbuffer_tracking@fbcpsr-indfb-scaledprimary
+kms_frontbuffer_tracking@fbcpsr-shrfb-scaledprimary
+kms_frontbuffer_tracking@drrs-indfb-scaledprimary
+kms_frontbuffer_tracking@drrs-shrfb-scaledprimary
+kms_frontbuffer_tracking@fbcdrrs-indfb-scaledprimary
+kms_frontbuffer_tracking@fbcdrrs-shrfb-scaledprimary
+kms_frontbuffer_tracking@psrdrrs-indfb-scaledprimary
+kms_frontbuffer_tracking@psrdrrs-shrfb-scaledprimary
+kms_frontbuffer_tracking@fbcpsrdrrs-indfb-scaledprimary
+kms_frontbuffer_tracking@fbcpsrdrrs-shrfb-scaledprimary
+kms_frontbuffer_tracking@fbc-modesetfrombusy
+kms_frontbuffer_tracking@fbc-badstride
+kms_frontbuffer_tracking@fbc-stridechange
+kms_frontbuffer_tracking@fbc-tiling-linear
+kms_frontbuffer_tracking@fbc-tiling-y
+kms_frontbuffer_tracking@fbc-tiling-4
+kms_frontbuffer_tracking@fbc-suspend
+kms_frontbuffer_tracking@psr-modesetfrombusy
+kms_frontbuffer_tracking@psr-slowdraw
+kms_frontbuffer_tracking@psr-suspend
+kms_frontbuffer_tracking@fbcpsr-modesetfrombusy
+kms_frontbuffer_tracking@fbcpsr-badstride
+kms_frontbuffer_tracking@fbcpsr-stridechange
+kms_frontbuffer_tracking@fbcpsr-tiling-linear
+kms_frontbuffer_tracking@fbcpsr-tiling-y
+kms_frontbuffer_tracking@fbcpsr-tiling-4
+kms_frontbuffer_tracking@fbcpsr-slowdraw
+kms_frontbuffer_tracking@fbcpsr-suspend
+kms_frontbuffer_tracking@drrs-modesetfrombusy
+kms_frontbuffer_tracking@drrs-slowdraw
+kms_frontbuffer_tracking@drrs-suspend
+kms_frontbuffer_tracking@fbcdrrs-modesetfrombusy
+kms_frontbuffer_tracking@fbcdrrs-badstride
+kms_frontbuffer_tracking@fbcdrrs-stridechange
+kms_frontbuffer_tracking@fbcdrrs-tiling-linear
+kms_frontbuffer_tracking@fbcdrrs-tiling-y
+kms_frontbuffer_tracking@fbcdrrs-tiling-4
+kms_frontbuffer_tracking@fbcdrrs-slowdraw
+kms_frontbuffer_tracking@fbcdrrs-suspend
+kms_frontbuffer_tracking@psrdrrs-modesetfrombusy
+kms_frontbuffer_tracking@psrdrrs-slowdraw
+kms_frontbuffer_tracking@psrdrrs-suspend
+kms_frontbuffer_tracking@fbcpsrdrrs-modesetfrombusy
+kms_frontbuffer_tracking@fbcpsrdrrs-badstride
+kms_frontbuffer_tracking@fbcpsrdrrs-stridechange
+kms_frontbuffer_tracking@fbcpsrdrrs-tiling-linear
+kms_frontbuffer_tracking@fbcpsrdrrs-tiling-y
+kms_frontbuffer_tracking@fbcpsrdrrs-tiling-4
+kms_frontbuffer_tracking@fbcpsrdrrs-slowdraw
+kms_frontbuffer_tracking@fbcpsrdrrs-suspend
+kms_frontbuffer_tracking@basic
+kms_getfb@getfb-handle-zero
+kms_getfb@getfb-handle-valid
+kms_getfb@getfb-handle-closed
+kms_getfb@getfb-handle-not-fb
+kms_getfb@getfb-addfb-different-handles
+kms_getfb@getfb-repeated-different-handles
+kms_getfb@getfb-reject-ccs
+kms_getfb@getfb2-handle-zero
+kms_getfb@getfb2-handle-closed
+kms_getfb@getfb2-handle-not-fb
+kms_getfb@getfb2-accept-ccs
+kms_getfb@getfb2-into-addfb2
+kms_getfb@getfb-handle-protection
+kms_getfb@getfb2-handle-protection
+kms_hdmi_inject@inject-4k
+kms_hdmi_inject@inject-audio
+kms_hdr@bpc-switch
+kms_hdr@bpc-switch-dpms
+kms_hdr@bpc-switch-suspend
+kms_hdr@static-toggle
+kms_hdr@static-toggle-dpms
+kms_hdr@static-toggle-suspend
+kms_hdr@static-swap
+kms_hdr@invalid-metadata-sizes
+kms_hdr@invalid-hdr
+kms_invalid_mode@clock-too-high
+kms_invalid_mode@zero-clock
+kms_invalid_mode@int-max-clock
+kms_invalid_mode@uint-max-clock
+kms_invalid_mode@zero-hdisplay
+kms_invalid_mode@zero-vdisplay
+kms_invalid_mode@bad-hsync-start
+kms_invalid_mode@bad-vsync-start
+kms_invalid_mode@bad-hsync-end
+kms_invalid_mode@bad-vsync-end
+kms_invalid_mode@bad-htotal
+kms_invalid_mode@bad-vtotal
+kms_legacy_colorkey@basic
+kms_legacy_colorkey@invalid-plane
+kms_multipipe_modeset@basic-max-pipe-crc-check
+kms_panel_fitting@legacy
+kms_panel_fitting@atomic-fastset
+kms_pipe_b_c_ivb@pipe-B-dpms-off-modeset-pipe-C
+kms_pipe_b_c_ivb@pipe-B-double-modeset-then-modeset-pipe-C
+kms_pipe_b_c_ivb@disable-pipe-B-enable-pipe-C
+kms_pipe_b_c_ivb@from-pipe-C-to-B-with-3-lanes
+kms_pipe_b_c_ivb@enable-pipe-C-while-B-has-3-lanes
+kms_pipe_crc_basic@bad-source
+kms_pipe_crc_basic@read-crc
+kms_pipe_crc_basic@read-crc-frame-sequence
+kms_pipe_crc_basic@nonblocking-crc
+kms_pipe_crc_basic@nonblocking-crc-frame-sequence
+kms_pipe_crc_basic@suspend-read-crc
+kms_pipe_crc_basic@hang-read-crc
+kms_pipe_crc_basic@disable-crc-after-crtc
+kms_pipe_crc_basic@compare-crc-sanitycheck-xr24
+kms_pipe_crc_basic@compare-crc-sanitycheck-nv12
+kms_plane@pixel-format
+kms_plane@pixel-format-source-clamping
+kms_plane@plane-position-covered
+kms_plane@plane-position-hole
+kms_plane@plane-position-hole-dpms
+kms_plane@plane-panning-top-left
+kms_plane@plane-panning-bottom-right
+kms_plane@plane-panning-bottom-right-suspend
+kms_plane@invalid-pixel-format-settings
+kms_plane_alpha_blend@alpha-basic
+kms_plane_alpha_blend@alpha-7efc
+kms_plane_alpha_blend@coverage-7efc
+kms_plane_alpha_blend@coverage-vs-premult-vs-constant
+kms_plane_alpha_blend@alpha-transparent-fb
+kms_plane_alpha_blend@alpha-opaque-fb
+kms_plane_alpha_blend@constant-alpha-min
+kms_plane_alpha_blend@constant-alpha-mid
+kms_plane_alpha_blend@constant-alpha-max
+kms_plane_cursor@primary
+kms_plane_cursor@overlay
+kms_plane_cursor@viewport
+kms_plane_lowres@tiling-none
+kms_plane_lowres@tiling-x
+kms_plane_lowres@tiling-y
+kms_plane_lowres@tiling-yf
+kms_plane_lowres@tiling-4
+kms_plane_multiple@tiling-none
+kms_plane_multiple@tiling-x
+kms_plane_multiple@tiling-y
+kms_plane_multiple@tiling-yf
+kms_plane_multiple@tiling-4
+kms_plane_scaling@plane-upscale-with-pixel-format-20x20
+kms_plane_scaling@plane-upscale-with-pixel-format-factor-0-25
+kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-25
+kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-5
+kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-75
+kms_plane_scaling@plane-scaler-with-pixel-format-unity-scaling
+kms_plane_scaling@plane-upscale-with-rotation-20x20
+kms_plane_scaling@plane-upscale-with-rotation-factor-0-25
+kms_plane_scaling@plane-downscale-with-rotation-factor-0-25
+kms_plane_scaling@plane-downscale-with-rotation-factor-0-5
+kms_plane_scaling@plane-downscale-with-rotation-factor-0-75
+kms_plane_scaling@plane-scaler-with-rotation-unity-scaling
+kms_plane_scaling@plane-upscale-with-modifiers-20x20
+kms_plane_scaling@plane-upscale-with-modifiers-factor-0-25
+kms_plane_scaling@plane-downscale-with-modifiers-factor-0-25
+kms_plane_scaling@plane-downscale-with-modifiers-factor-0-5
+kms_plane_scaling@plane-downscale-with-modifiers-factor-0-75
+kms_plane_scaling@plane-scaler-with-modifiers-unity-scaling
+kms_plane_scaling@plane-scaler-with-clipping-clamping-pixel-formats
+kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation
+kms_plane_scaling@plane-scaler-with-clipping-clamping-modifiers
+kms_plane_scaling@planes-upscale-20x20
+kms_plane_scaling@planes-upscale-factor-0-25
+kms_plane_scaling@planes-scaler-unity-scaling
+kms_plane_scaling@planes-downscale-factor-0-25
+kms_plane_scaling@planes-downscale-factor-0-5
+kms_plane_scaling@planes-downscale-factor-0-75
+kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-25
+kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-5
+kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-75
+kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-25
+kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5
+kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-75
+kms_plane_scaling@planes-unity-scaling-downscale-factor-0-25
+kms_plane_scaling@planes-unity-scaling-downscale-factor-0-5
+kms_plane_scaling@planes-unity-scaling-downscale-factor-0-75
+kms_plane_scaling@planes-downscale-factor-0-25-upscale-20x20
+kms_plane_scaling@planes-downscale-factor-0-25-upscale-factor-0-25
+kms_plane_scaling@planes-downscale-factor-0-25-unity-scaling
+kms_plane_scaling@planes-downscale-factor-0-5-upscale-20x20
+kms_plane_scaling@planes-downscale-factor-0-5-upscale-factor-0-25
+kms_plane_scaling@planes-downscale-factor-0-5-unity-scaling
+kms_plane_scaling@planes-downscale-factor-0-75-upscale-20x20
+kms_plane_scaling@planes-downscale-factor-0-75-upscale-factor-0-25
+kms_plane_scaling@planes-downscale-factor-0-75-unity-scaling
+kms_plane_scaling@intel-max-src-size
+kms_plane_scaling@invalid-num-scalers
+kms_plane_scaling@invalid-parameters
+kms_plane_scaling@2x-scaler-multi-pipe
+kms_prime@basic-crc-hybrid
+kms_prime@basic-modeset-hybrid
+kms_prime@D3hot
+kms_prime@basic-crc-vgem
+kms_prop_blob@basic
+kms_prop_blob@blob-prop-core
+kms_prop_blob@blob-prop-validate
+kms_prop_blob@blob-prop-lifetime
+kms_prop_blob@blob-multiple
+kms_prop_blob@invalid-get-prop-any
+kms_prop_blob@invalid-get-prop
+kms_prop_blob@invalid-set-prop-any
+kms_prop_blob@invalid-set-prop
+kms_properties@plane-properties-legacy
+kms_properties@plane-properties-atomic
+kms_properties@crtc-properties-legacy
+kms_properties@crtc-properties-atomic
+kms_properties@connector-properties-legacy
+kms_properties@connector-properties-atomic
+kms_properties@invalid-properties-legacy
+kms_properties@invalid-properties-atomic
+kms_properties@get_properties-sanity-atomic
+kms_properties@get_properties-sanity-non-atomic
+kms_psr@basic
+kms_psr@no_drrs
+kms_psr@primary_page_flip
+kms_psr@primary_mmap_gtt
+kms_psr@primary_mmap_cpu
+kms_psr@primary_blt
+kms_psr@primary_render
+kms_psr@sprite_mmap_gtt
+kms_psr@cursor_mmap_gtt
+kms_psr@sprite_mmap_cpu
+kms_psr@cursor_mmap_cpu
+kms_psr@sprite_blt
+kms_psr@cursor_blt
+kms_psr@sprite_render
+kms_psr@cursor_render
+kms_psr@sprite_plane_move
+kms_psr@cursor_plane_move
+kms_psr@sprite_plane_onoff
+kms_psr@cursor_plane_onoff
+kms_psr@dpms
+kms_psr@suspend
+kms_psr@psr2_basic
+kms_psr@psr2_no_drrs
+kms_psr@psr2_primary_page_flip
+kms_psr@psr2_primary_mmap_gtt
+kms_psr@psr2_primary_mmap_cpu
+kms_psr@psr2_primary_blt
+kms_psr@psr2_primary_render
+kms_psr@psr2_sprite_mmap_gtt
+kms_psr@psr2_cursor_mmap_gtt
+kms_psr@psr2_sprite_mmap_cpu
+kms_psr@psr2_cursor_mmap_cpu
+kms_psr@psr2_sprite_blt
+kms_psr@psr2_cursor_blt
+kms_psr@psr2_sprite_render
+kms_psr@psr2_cursor_render
+kms_psr@psr2_sprite_plane_move
+kms_psr@psr2_cursor_plane_move
+kms_psr@psr2_sprite_plane_onoff
+kms_psr@psr2_cursor_plane_onoff
+kms_psr@psr2_dpms
+kms_psr@psr2_suspend
+kms_psr2_sf@primary-plane-update-sf-dmg-area
+kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb
+kms_psr2_sf@overlay-plane-update-sf-dmg-area
+kms_psr2_sf@cursor-plane-update-sf
+kms_psr2_sf@cursor-plane-move-continuous-sf
+kms_psr2_sf@cursor-plane-move-continuous-exceed-sf
+kms_psr2_sf@cursor-plane-move-continuous-exceed-fully-sf
+kms_psr2_sf@plane-move-sf-dmg-area
+kms_psr2_sf@overlay-plane-move-continuous-sf
+kms_psr2_sf@overlay-plane-move-continuous-exceed-sf
+kms_psr2_sf@overlay-plane-move-continuous-exceed-fully-sf
+kms_psr2_sf@overlay-primary-update-sf-dmg-area
+kms_psr2_sf@overlay-plane-update-continuous-sf
+kms_psr2_su@page_flip-XRGB8888
+kms_psr2_su@page_flip-NV12
+kms_psr2_su@page_flip-P010
+kms_psr2_su@frontbuffer-XRGB8888
+kms_pwrite_crc
+kms_rmfb@rmfb-ioctl
+kms_rmfb@close-fd
+kms_rotation_crc@primary-rotation-90
+kms_rotation_crc@primary-rotation-180
+kms_rotation_crc@primary-rotation-270
+kms_rotation_crc@sprite-rotation-90
+kms_rotation_crc@sprite-rotation-180
+kms_rotation_crc@sprite-rotation-270
+kms_rotation_crc@cursor-rotation-180
+kms_rotation_crc@sprite-rotation-90-pos-100-0
+kms_rotation_crc@bad-pixel-format
+kms_rotation_crc@bad-tiling
+kms_rotation_crc@primary-x-tiled-reflect-x-0
+kms_rotation_crc@primary-x-tiled-reflect-x-180
+kms_rotation_crc@primary-y-tiled-reflect-x-0
+kms_rotation_crc@primary-y-tiled-reflect-x-90
+kms_rotation_crc@primary-y-tiled-reflect-x-180
+kms_rotation_crc@primary-y-tiled-reflect-x-270
+kms_rotation_crc@primary-yf-tiled-reflect-x-0
+kms_rotation_crc@primary-yf-tiled-reflect-x-90
+kms_rotation_crc@primary-yf-tiled-reflect-x-180
+kms_rotation_crc@primary-yf-tiled-reflect-x-270
+kms_rotation_crc@primary-4-tiled-reflect-x-0
+kms_rotation_crc@primary-4-tiled-reflect-x-180
+kms_rotation_crc@multiplane-rotation
+kms_rotation_crc@multiplane-rotation-cropping-top
+kms_rotation_crc@multiplane-rotation-cropping-bottom
+kms_rotation_crc@exhaust-fences
+kms_scaling_modes@scaling-mode-full
+kms_scaling_modes@scaling-mode-center
+kms_scaling_modes@scaling-mode-full-aspect
+kms_scaling_modes@scaling-mode-none
+kms_selftest@drm_cmdline
+kms_selftest@drm_damage
+kms_selftest@drm_dp_mst
+kms_selftest@drm_format_helper
+kms_selftest@drm_format
+kms_selftest@framebuffer
+kms_selftest@drm_plane
+kms_setmode@basic
+kms_setmode@basic-clone-single-crtc
+kms_setmode@invalid-clone-single-crtc
+kms_setmode@invalid-clone-exclusive-crtc
+kms_setmode@clone-exclusive-crtc
+kms_setmode@invalid-clone-single-crtc-stealing
+kms_sysfs_edid_timing
+kms_tv_load_detect@load-detect
+kms_universal_plane@universal-plane-pipe-A-functional
+kms_universal_plane@universal-plane-pipe-A-sanity
+kms_universal_plane@disable-primary-vs-flip-pipe-A
+kms_universal_plane@cursor-fb-leak-pipe-A
+kms_universal_plane@universal-plane-pageflip-windowed-pipe-A
+kms_universal_plane@universal-plane-pipe-B-functional
+kms_universal_plane@universal-plane-pipe-B-sanity
+kms_universal_plane@disable-primary-vs-flip-pipe-B
+kms_universal_plane@cursor-fb-leak-pipe-B
+kms_universal_plane@universal-plane-pageflip-windowed-pipe-B
+kms_universal_plane@universal-plane-pipe-C-functional
+kms_universal_plane@universal-plane-pipe-C-sanity
+kms_universal_plane@disable-primary-vs-flip-pipe-C
+kms_universal_plane@cursor-fb-leak-pipe-C
+kms_universal_plane@universal-plane-pageflip-windowed-pipe-C
+kms_universal_plane@universal-plane-pipe-D-functional
+kms_universal_plane@universal-plane-pipe-D-sanity
+kms_universal_plane@disable-primary-vs-flip-pipe-D
+kms_universal_plane@cursor-fb-leak-pipe-D
+kms_universal_plane@universal-plane-pageflip-windowed-pipe-D
+kms_universal_plane@universal-plane-pipe-E-functional
+kms_universal_plane@universal-plane-pipe-E-sanity
+kms_universal_plane@disable-primary-vs-flip-pipe-E
+kms_universal_plane@cursor-fb-leak-pipe-E
+kms_universal_plane@universal-plane-pageflip-windowed-pipe-E
+kms_universal_plane@universal-plane-pipe-F-functional
+kms_universal_plane@universal-plane-pipe-F-sanity
+kms_universal_plane@disable-primary-vs-flip-pipe-F
+kms_universal_plane@cursor-fb-leak-pipe-F
+kms_universal_plane@universal-plane-pageflip-windowed-pipe-F
+kms_universal_plane@universal-plane-pipe-G-functional
+kms_universal_plane@universal-plane-pipe-G-sanity
+kms_universal_plane@disable-primary-vs-flip-pipe-G
+kms_universal_plane@cursor-fb-leak-pipe-G
+kms_universal_plane@universal-plane-pageflip-windowed-pipe-G
+kms_universal_plane@universal-plane-pipe-H-functional
+kms_universal_plane@universal-plane-pipe-H-sanity
+kms_universal_plane@disable-primary-vs-flip-pipe-H
+kms_universal_plane@cursor-fb-leak-pipe-H
+kms_universal_plane@universal-plane-pageflip-windowed-pipe-H
+kms_vblank@invalid
+kms_vblank@crtc-id
+kms_vblank@pipe-A-accuracy-idle
+kms_vblank@pipe-A-query-idle
+kms_vblank@pipe-A-query-idle-hang
+kms_vblank@pipe-A-query-forked
+kms_vblank@pipe-A-query-forked-hang
+kms_vblank@pipe-A-query-busy
+kms_vblank@pipe-A-query-busy-hang
+kms_vblank@pipe-A-query-forked-busy
+kms_vblank@pipe-A-query-forked-busy-hang
+kms_vblank@pipe-A-wait-idle
+kms_vblank@pipe-A-wait-idle-hang
+kms_vblank@pipe-A-wait-forked
+kms_vblank@pipe-A-wait-forked-hang
+kms_vblank@pipe-A-wait-busy
+kms_vblank@pipe-A-wait-busy-hang
+kms_vblank@pipe-A-wait-forked-busy
+kms_vblank@pipe-A-wait-forked-busy-hang
+kms_vblank@pipe-A-ts-continuation-idle
+kms_vblank@pipe-A-ts-continuation-idle-hang
+kms_vblank@pipe-A-ts-continuation-dpms-rpm
+kms_vblank@pipe-A-ts-continuation-dpms-suspend
+kms_vblank@pipe-A-ts-continuation-suspend
+kms_vblank@pipe-A-ts-continuation-modeset
+kms_vblank@pipe-A-ts-continuation-modeset-hang
+kms_vblank@pipe-A-ts-continuation-modeset-rpm
+kms_vblank@pipe-B-accuracy-idle
+kms_vblank@pipe-B-query-idle
+kms_vblank@pipe-B-query-idle-hang
+kms_vblank@pipe-B-query-forked
+kms_vblank@pipe-B-query-forked-hang
+kms_vblank@pipe-B-query-busy
+kms_vblank@pipe-B-query-busy-hang
+kms_vblank@pipe-B-query-forked-busy
+kms_vblank@pipe-B-query-forked-busy-hang
+kms_vblank@pipe-B-wait-idle
+kms_vblank@pipe-B-wait-idle-hang
+kms_vblank@pipe-B-wait-forked
+kms_vblank@pipe-B-wait-forked-hang
+kms_vblank@pipe-B-wait-busy
+kms_vblank@pipe-B-wait-busy-hang
+kms_vblank@pipe-B-wait-forked-busy
+kms_vblank@pipe-B-wait-forked-busy-hang
+kms_vblank@pipe-B-ts-continuation-idle
+kms_vblank@pipe-B-ts-continuation-idle-hang
+kms_vblank@pipe-B-ts-continuation-dpms-rpm
+kms_vblank@pipe-B-ts-continuation-dpms-suspend
+kms_vblank@pipe-B-ts-continuation-suspend
+kms_vblank@pipe-B-ts-continuation-modeset
+kms_vblank@pipe-B-ts-continuation-modeset-hang
+kms_vblank@pipe-B-ts-continuation-modeset-rpm
+kms_vblank@pipe-C-accuracy-idle
+kms_vblank@pipe-C-query-idle
+kms_vblank@pipe-C-query-idle-hang
+kms_vblank@pipe-C-query-forked
+kms_vblank@pipe-C-query-forked-hang
+kms_vblank@pipe-C-query-busy
+kms_vblank@pipe-C-query-busy-hang
+kms_vblank@pipe-C-query-forked-busy
+kms_vblank@pipe-C-query-forked-busy-hang
+kms_vblank@pipe-C-wait-idle
+kms_vblank@pipe-C-wait-idle-hang
+kms_vblank@pipe-C-wait-forked
+kms_vblank@pipe-C-wait-forked-hang
+kms_vblank@pipe-C-wait-busy
+kms_vblank@pipe-C-wait-busy-hang
+kms_vblank@pipe-C-wait-forked-busy
+kms_vblank@pipe-C-wait-forked-busy-hang
+kms_vblank@pipe-C-ts-continuation-idle
+kms_vblank@pipe-C-ts-continuation-idle-hang
+kms_vblank@pipe-C-ts-continuation-dpms-rpm
+kms_vblank@pipe-C-ts-continuation-dpms-suspend
+kms_vblank@pipe-C-ts-continuation-suspend
+kms_vblank@pipe-C-ts-continuation-modeset
+kms_vblank@pipe-C-ts-continuation-modeset-hang
+kms_vblank@pipe-C-ts-continuation-modeset-rpm
+kms_vblank@pipe-D-accuracy-idle
+kms_vblank@pipe-D-query-idle
+kms_vblank@pipe-D-query-idle-hang
+kms_vblank@pipe-D-query-forked
+kms_vblank@pipe-D-query-forked-hang
+kms_vblank@pipe-D-query-busy
+kms_vblank@pipe-D-query-busy-hang
+kms_vblank@pipe-D-query-forked-busy
+kms_vblank@pipe-D-query-forked-busy-hang
+kms_vblank@pipe-D-wait-idle
+kms_vblank@pipe-D-wait-idle-hang
+kms_vblank@pipe-D-wait-forked
+kms_vblank@pipe-D-wait-forked-hang
+kms_vblank@pipe-D-wait-busy
+kms_vblank@pipe-D-wait-busy-hang
+kms_vblank@pipe-D-wait-forked-busy
+kms_vblank@pipe-D-wait-forked-busy-hang
+kms_vblank@pipe-D-ts-continuation-idle
+kms_vblank@pipe-D-ts-continuation-idle-hang
+kms_vblank@pipe-D-ts-continuation-dpms-rpm
+kms_vblank@pipe-D-ts-continuation-dpms-suspend
+kms_vblank@pipe-D-ts-continuation-suspend
+kms_vblank@pipe-D-ts-continuation-modeset
+kms_vblank@pipe-D-ts-continuation-modeset-hang
+kms_vblank@pipe-D-ts-continuation-modeset-rpm
+kms_vblank@pipe-E-accuracy-idle
+kms_vblank@pipe-E-query-idle
+kms_vblank@pipe-E-query-idle-hang
+kms_vblank@pipe-E-query-forked
+kms_vblank@pipe-E-query-forked-hang
+kms_vblank@pipe-E-query-busy
+kms_vblank@pipe-E-query-busy-hang
+kms_vblank@pipe-E-query-forked-busy
+kms_vblank@pipe-E-query-forked-busy-hang
+kms_vblank@pipe-E-wait-idle
+kms_vblank@pipe-E-wait-idle-hang
+kms_vblank@pipe-E-wait-forked
+kms_vblank@pipe-E-wait-forked-hang
+kms_vblank@pipe-E-wait-busy
+kms_vblank@pipe-E-wait-busy-hang
+kms_vblank@pipe-E-wait-forked-busy
+kms_vblank@pipe-E-wait-forked-busy-hang
+kms_vblank@pipe-E-ts-continuation-idle
+kms_vblank@pipe-E-ts-continuation-idle-hang
+kms_vblank@pipe-E-ts-continuation-dpms-rpm
+kms_vblank@pipe-E-ts-continuation-dpms-suspend
+kms_vblank@pipe-E-ts-continuation-suspend
+kms_vblank@pipe-E-ts-continuation-modeset
+kms_vblank@pipe-E-ts-continuation-modeset-hang
+kms_vblank@pipe-E-ts-continuation-modeset-rpm
+kms_vblank@pipe-F-accuracy-idle
+kms_vblank@pipe-F-query-idle
+kms_vblank@pipe-F-query-idle-hang
+kms_vblank@pipe-F-query-forked
+kms_vblank@pipe-F-query-forked-hang
+kms_vblank@pipe-F-query-busy
+kms_vblank@pipe-F-query-busy-hang
+kms_vblank@pipe-F-query-forked-busy
+kms_vblank@pipe-F-query-forked-busy-hang
+kms_vblank@pipe-F-wait-idle
+kms_vblank@pipe-F-wait-idle-hang
+kms_vblank@pipe-F-wait-forked
+kms_vblank@pipe-F-wait-forked-hang
+kms_vblank@pipe-F-wait-busy
+kms_vblank@pipe-F-wait-busy-hang
+kms_vblank@pipe-F-wait-forked-busy
+kms_vblank@pipe-F-wait-forked-busy-hang
+kms_vblank@pipe-F-ts-continuation-idle
+kms_vblank@pipe-F-ts-continuation-idle-hang
+kms_vblank@pipe-F-ts-continuation-dpms-rpm
+kms_vblank@pipe-F-ts-continuation-dpms-suspend
+kms_vblank@pipe-F-ts-continuation-suspend
+kms_vblank@pipe-F-ts-continuation-modeset
+kms_vblank@pipe-F-ts-continuation-modeset-hang
+kms_vblank@pipe-F-ts-continuation-modeset-rpm
+kms_vblank@pipe-G-accuracy-idle
+kms_vblank@pipe-G-query-idle
+kms_vblank@pipe-G-query-idle-hang
+kms_vblank@pipe-G-query-forked
+kms_vblank@pipe-G-query-forked-hang
+kms_vblank@pipe-G-query-busy
+kms_vblank@pipe-G-query-busy-hang
+kms_vblank@pipe-G-query-forked-busy
+kms_vblank@pipe-G-query-forked-busy-hang
+kms_vblank@pipe-G-wait-idle
+kms_vblank@pipe-G-wait-idle-hang
+kms_vblank@pipe-G-wait-forked
+kms_vblank@pipe-G-wait-forked-hang
+kms_vblank@pipe-G-wait-busy
+kms_vblank@pipe-G-wait-busy-hang
+kms_vblank@pipe-G-wait-forked-busy
+kms_vblank@pipe-G-wait-forked-busy-hang
+kms_vblank@pipe-G-ts-continuation-idle
+kms_vblank@pipe-G-ts-continuation-idle-hang
+kms_vblank@pipe-G-ts-continuation-dpms-rpm
+kms_vblank@pipe-G-ts-continuation-dpms-suspend
+kms_vblank@pipe-G-ts-continuation-suspend
+kms_vblank@pipe-G-ts-continuation-modeset
+kms_vblank@pipe-G-ts-continuation-modeset-hang
+kms_vblank@pipe-G-ts-continuation-modeset-rpm
+kms_vblank@pipe-H-accuracy-idle
+kms_vblank@pipe-H-query-idle
+kms_vblank@pipe-H-query-idle-hang
+kms_vblank@pipe-H-query-forked
+kms_vblank@pipe-H-query-forked-hang
+kms_vblank@pipe-H-query-busy
+kms_vblank@pipe-H-query-busy-hang
+kms_vblank@pipe-H-query-forked-busy
+kms_vblank@pipe-H-query-forked-busy-hang
+kms_vblank@pipe-H-wait-idle
+kms_vblank@pipe-H-wait-idle-hang
+kms_vblank@pipe-H-wait-forked
+kms_vblank@pipe-H-wait-forked-hang
+kms_vblank@pipe-H-wait-busy
+kms_vblank@pipe-H-wait-busy-hang
+kms_vblank@pipe-H-wait-forked-busy
+kms_vblank@pipe-H-wait-forked-busy-hang
+kms_vblank@pipe-H-ts-continuation-idle
+kms_vblank@pipe-H-ts-continuation-idle-hang
+kms_vblank@pipe-H-ts-continuation-dpms-rpm
+kms_vblank@pipe-H-ts-continuation-dpms-suspend
+kms_vblank@pipe-H-ts-continuation-suspend
+kms_vblank@pipe-H-ts-continuation-modeset
+kms_vblank@pipe-H-ts-continuation-modeset-hang
+kms_vblank@pipe-H-ts-continuation-modeset-rpm
+kms_vrr@flip-basic
+kms_vrr@flip-dpms
+kms_vrr@flip-suspend
+kms_vrr@flipline
+kms_vrr@negative-basic
+kms_writeback@writeback-pixel-formats
+kms_writeback@writeback-invalid-parameters
+kms_writeback@writeback-fb-id
+kms_writeback@writeback-check-output
+prime_mmap_kms@buffer-sharing
diff --git a/drivers/gpu/drm/ci/x86_64.config b/drivers/gpu/drm/ci/x86_64.config
new file mode 100644
index 000000000000..1cbd49a5b23a
--- /dev/null
+++ b/drivers/gpu/drm/ci/x86_64.config
@@ -0,0 +1,111 @@
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_DEBUG_KERNEL=y
+
+CONFIG_CRYPTO_ZSTD=y
+CONFIG_ZRAM_MEMORY_TRACKING=y
+CONFIG_ZRAM_WRITEBACK=y
+CONFIG_ZRAM=y
+CONFIG_ZSMALLOC_STAT=y
+
+CONFIG_PWM=y
+CONFIG_PM_DEVFREQ=y
+CONFIG_OF=y
+CONFIG_CROS_EC=y
+
+# abootimg with a 'dummy' rootfs fails with root=/dev/nfs
+CONFIG_BLK_DEV_INITRD=n
+
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+
+CONFIG_DRM=y
+CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_PWM_CROS_EC=y
+CONFIG_BACKLIGHT_PWM=y
+
+# Strip out some stuff we don't need for graphics testing, to reduce
+# the build.
+CONFIG_CAN=n
+CONFIG_WIRELESS=n
+CONFIG_RFKILL=n
+CONFIG_WLAN=n
+
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR=y
+
+CONFIG_REGULATOR_VCTRL=y
+
+CONFIG_KASAN=n
+CONFIG_KASAN_INLINE=n
+CONFIG_STACKTRACE=n
+
+CONFIG_TMPFS=y
+
+CONFIG_PROVE_LOCKING=n
+CONFIG_DEBUG_LOCKDEP=n
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+
+CONFIG_DETECT_HUNG_TASK=y
+
+CONFIG_USB_USBNET=y
+CONFIG_NETDEVICES=y
+CONFIG_USB_NET_DRIVERS=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_NET_AX8817X=y
+CONFIG_USB_NET_SMSC95XX=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_ETH=y
+
+CONFIG_FW_LOADER_COMPRESS=y
+
+# options for AMD devices
+CONFIG_X86_AMD_PLATFORM_DEVICE=y
+CONFIG_ACPI_VIDEO=y
+CONFIG_X86_AMD_FREQ_SENSITIVITY=y
+CONFIG_PINCTRL=y
+CONFIG_PINCTRL_AMD=y
+CONFIG_DRM_AMDGPU=m
+CONFIG_DRM_AMDGPU_SI=y
+CONFIG_DRM_AMDGPU_USERPTR=y
+CONFIG_DRM_AMD_ACP=n
+CONFIG_ACPI_WMI=y
+CONFIG_MXM_WMI=y
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=y
+CONFIG_PARPORT_SERIAL=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_CHROME_PLATFORMS=y
+CONFIG_KVM_AMD=m
+
+#options for Intel devices
+CONFIG_MFD_INTEL_LPSS_PCI=y
+CONFIG_KVM_INTEL=m
+
+#options for KVM guests
+CONFIG_FUSE_FS=y
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_KVM=y
+CONFIG_KVM_GUEST=y
+CONFIG_VIRT_DRIVERS=y
+CONFIG_VIRTIO_FS=y
+CONFIG_DRM_VIRTIO_GPU=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_VIRTIO_NET=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_PARAVIRT=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_VIRTIO=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+CONFIG_CRYPTO_DEV_VIRTIO=y
+CONFIG_HW_RANDOM_VIRTIO=y
+CONFIG_BLK_MQ_VIRTIO=y
+CONFIG_TUN=y
+CONFIG_VSOCKETS=y
+CONFIG_VIRTIO_VSOCKETS=y
+CONFIG_VHOST_VSOCK=m
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
new file mode 100644
index 000000000000..bd9392536e7c
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
@@ -0,0 +1,19 @@
+kms_addfb_basic@bad-pitch-65536,Fail
+kms_addfb_basic@bo-too-small,Fail
+kms_async_flips@invalid-async-flip,Fail
+kms_atomic@plane-immutable-zpos,Fail
+kms_atomic_transition@plane-toggle-modeset-transition,Fail
+kms_bw@linear-tiling-1-displays-2560x1440p,Fail
+kms_bw@linear-tiling-1-displays-3840x2160p,Fail
+kms_bw@linear-tiling-2-displays-3840x2160p,Fail
+kms_bw@linear-tiling-3-displays-1920x1080p,Fail
+kms_color@degamma,Fail
+kms_cursor_crc@cursor-size-change,Fail
+kms_cursor_crc@pipe-A-cursor-size-change,Fail
+kms_cursor_crc@pipe-B-cursor-size-change,Fail
+kms_cursor_legacy@forked-move,Fail
+kms_hdr@bpc-switch,Fail
+kms_hdr@bpc-switch-dpms,Fail
+kms_plane_multiple@atomic-pipe-A-tiling-none,Fail
+kms_rmfb@close-fd,Fail
+kms_rotation_crc@primary-rotation-180,Fail
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
new file mode 100644
index 000000000000..f8defa0f9e67
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
@@ -0,0 +1,21 @@
+kms_addfb_basic@too-high
+kms_async_flips@alternate-sync-async-flip
+kms_async_flips@async-flip-with-page-flip-events
+kms_async_flips@crc
+kms_async_flips@test-cursor
+kms_async_flips@test-time-stamp
+kms_atomic_transition@plane-all-modeset-transition-internal-panels
+kms_atomic_transition@plane-all-transition
+kms_atomic_transition@plane-use-after-nonblocking-unbind
+kms_bw@linear-tiling-1-displays-1920x1080p
+kms_bw@linear-tiling-2-displays-1920x1080p
+kms_bw@linear-tiling-2-displays-2560x1440p
+kms_bw@linear-tiling-3-displays-2560x1440p
+kms_bw@linear-tiling-3-displays-3840x2160p
+kms_cursor_crc@pipe-A-cursor-alpha-opaque
+kms_cursor_crc@pipe-B-cursor-alpha-opaque
+kms_plane@pixel-format
+kms_plane_multiple@atomic-pipe-B-tiling-none
+kms_plane_scaling@downscale-with-rotation-factor-0-5
+kms_universal_plane@disable-primary-vs-flip-pipe-A
+kms_universal_plane@disable-primary-vs-flip-pipe-B
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
new file mode 100644
index 000000000000..e2c538a0f954
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
@@ -0,0 +1,2 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.* \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
new file mode 100644
index 000000000000..5f513c638beb
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
@@ -0,0 +1,17 @@
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
+kms_plane_alpha_blend@alpha-basic,Fail
+kms_plane_alpha_blend@alpha-opaque-fb,Fail
+kms_plane_alpha_blend@alpha-transparent-fb,Fail
+kms_plane_alpha_blend@constant-alpha-max,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
new file mode 100644
index 000000000000..d5000515a315
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
@@ -0,0 +1,32 @@
+kms_bw@linear-tiling-2-displays-1920x1080p
+kms_bw@linear-tiling-2-displays-2560x1440p
+kms_bw@linear-tiling-2-displays-3840x2160p
+kms_bw@linear-tiling-3-displays-1920x1080p
+kms_bw@linear-tiling-3-displays-2560x1440p
+kms_bw@linear-tiling-3-displays-3840x2160p
+kms_bw@linear-tiling-4-displays-1920x1080p
+kms_bw@linear-tiling-4-displays-2560x1440p
+kms_bw@linear-tiling-4-displays-3840x2160p
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-upscaling
+kms_plane_alpha_blend@pipe-A-alpha-basic
+kms_plane_alpha_blend@pipe-A-alpha-opaque-fb
+kms_plane_alpha_blend@pipe-A-alpha-transparent-fb
+kms_plane_alpha_blend@pipe-A-constant-alpha-max
+kms_plane_alpha_blend@pipe-B-alpha-basic
+kms_plane_alpha_blend@pipe-B-alpha-opaque-fb
+kms_plane_alpha_blend@pipe-B-alpha-transparent-fb
+kms_plane_alpha_blend@pipe-B-constant-alpha-max
+kms_plane_alpha_blend@pipe-C-alpha-basic
+kms_plane_alpha_blend@pipe-C-alpha-opaque-fb
+kms_plane_alpha_blend@pipe-C-alpha-transparent-fb
+kms_plane_alpha_blend@pipe-C-constant-alpha-max
+kms_sysfs_edid_timing
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
new file mode 100644
index 000000000000..fe55540a3f9a
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
@@ -0,0 +1,4 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+# This is generating kernel oops with divide error
+kms_plane_scaling@invalid-parameters \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
new file mode 100644
index 000000000000..46397ce38d5a
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
@@ -0,0 +1,58 @@
+kms_3d,Timeout
+kms_bw@linear-tiling-2-displays-1920x1080p,Fail
+kms_bw@linear-tiling-2-displays-2560x1440p,Fail
+kms_bw@linear-tiling-2-displays-3840x2160p,Fail
+kms_bw@linear-tiling-3-displays-1920x1080p,Fail
+kms_bw@linear-tiling-3-displays-2560x1440p,Fail
+kms_bw@linear-tiling-3-displays-3840x2160p,Fail
+kms_bw@linear-tiling-4-displays-1920x1080p,Fail
+kms_bw@linear-tiling-4-displays-2560x1440p,Fail
+kms_bw@linear-tiling-4-displays-3840x2160p,Fail
+kms_color@ctm-0-25,Fail
+kms_color@ctm-0-50,Fail
+kms_color@ctm-0-75,Fail
+kms_color@ctm-max,Fail
+kms_color@ctm-negative,Fail
+kms_color@ctm-red-to-blue,Fail
+kms_color@ctm-signed,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-upscaling,Fail
+kms_hdmi_inject@inject-4k,Timeout
+kms_plane@plane-position-hole,Timeout
+kms_plane_alpha_blend@alpha-basic,Fail
+kms_plane_alpha_blend@alpha-opaque-fb,Fail
+kms_plane_alpha_blend@alpha-transparent-fb,Fail
+kms_plane_alpha_blend@constant-alpha-max,Fail
+kms_plane_alpha_blend@pipe-A-alpha-opaque-fb,Fail
+kms_plane_alpha_blend@pipe-A-alpha-transparent-fb,Fail
+kms_plane_alpha_blend@pipe-A-constant-alpha-max,Fail
+kms_plane_alpha_blend@pipe-B-alpha-opaque-fb,Fail
+kms_plane_alpha_blend@pipe-B-alpha-transparent-fb,Fail
+kms_plane_alpha_blend@pipe-B-constant-alpha-max,Fail
+kms_plane_alpha_blend@pipe-C-alpha-opaque-fb,Fail
+kms_plane_alpha_blend@pipe-C-alpha-transparent-fb,Fail
+kms_plane_alpha_blend@pipe-C-constant-alpha-max,Fail
+kms_plane_multiple@tiling-y,Timeout
+kms_pwrite_crc,Timeout
+kms_sysfs_edid_timing,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt
new file mode 100644
index 000000000000..331c5841bb41
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt
@@ -0,0 +1 @@
+kms_frontbuffer_tracking@fbc-tiling-linear
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
new file mode 100644
index 000000000000..3430b215c06e
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
@@ -0,0 +1,6 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+# This is generating kernel oops with divide error
+kms_plane_scaling@invalid-parameters
+# This is cascading issues
+kms_3d \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
new file mode 100644
index 000000000000..6139b410e767
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
@@ -0,0 +1,18 @@
+kms_color@ctm-0-25,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
+kms_plane_alpha_blend@alpha-basic,Fail
+kms_plane_alpha_blend@alpha-opaque-fb,Fail
+kms_plane_alpha_blend@alpha-transparent-fb,Fail
+kms_plane_alpha_blend@constant-alpha-max,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
new file mode 100644
index 000000000000..0514a7b3fdb0
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
@@ -0,0 +1,38 @@
+kms_bw@linear-tiling-2-displays-1920x1080p
+kms_bw@linear-tiling-2-displays-2560x1440p
+kms_bw@linear-tiling-2-displays-3840x2160p
+kms_bw@linear-tiling-3-displays-1920x1080p
+kms_bw@linear-tiling-3-displays-2560x1440p
+kms_bw@linear-tiling-3-displays-3840x2160p
+kms_bw@linear-tiling-4-displays-1920x1080p
+kms_bw@linear-tiling-4-displays-2560x1440p
+kms_bw@linear-tiling-4-displays-3840x2160p
+kms_draw_crc@draw-method-xrgb8888-render-xtiled
+kms_flip@flip-vs-suspend
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-upscaling
+kms_hdr@bpc-switch-suspend
+kms_plane_alpha_blend@constant-alpha-min
+kms_plane_alpha_blend@pipe-A-alpha-basic
+kms_plane_alpha_blend@pipe-A-alpha-opaque-fb
+kms_plane_alpha_blend@pipe-A-alpha-transparent-fb
+kms_plane_alpha_blend@pipe-A-constant-alpha-max
+kms_plane_alpha_blend@pipe-B-alpha-basic
+kms_plane_alpha_blend@pipe-B-alpha-opaque-fb
+kms_plane_alpha_blend@pipe-B-alpha-transparent-fb
+kms_plane_alpha_blend@pipe-B-constant-alpha-max
+kms_plane_alpha_blend@pipe-C-alpha-basic
+kms_plane_alpha_blend@pipe-C-alpha-opaque-fb
+kms_plane_alpha_blend@pipe-C-alpha-transparent-fb
+kms_plane_alpha_blend@pipe-C-constant-alpha-max
+kms_psr2_su@page_flip-NV12
+kms_psr2_su@page_flip-P010
+kms_setmode@basic
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
new file mode 100644
index 000000000000..6d3d7ddc377f
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
@@ -0,0 +1,2 @@
+# This is generating kernel oops with divide error
+kms_plane_scaling@invalid-parameters
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
new file mode 100644
index 000000000000..5bd432e78129
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
@@ -0,0 +1,19 @@
+kms_fbcon_fbt@fbc,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
+kms_frontbuffer_tracking@fbcdrrs-tiling-linear,Fail
+kms_plane_alpha_blend@alpha-basic,Fail
+kms_plane_alpha_blend@alpha-opaque-fb,Fail
+kms_plane_alpha_blend@alpha-transparent-fb,Fail
+kms_plane_alpha_blend@constant-alpha-max,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt
new file mode 100644
index 000000000000..fc41d13a2d56
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt
@@ -0,0 +1,41 @@
+kms_bw@linear-tiling-1-displays-3840x2160p
+kms_bw@linear-tiling-2-displays-1920x1080p
+kms_bw@linear-tiling-2-displays-2560x1440p
+kms_bw@linear-tiling-2-displays-3840x2160p
+kms_bw@linear-tiling-3-displays-1920x1080p
+kms_bw@linear-tiling-3-displays-2560x1440p
+kms_bw@linear-tiling-3-displays-3840x2160p
+kms_bw@linear-tiling-4-displays-1920x1080p
+kms_bw@linear-tiling-4-displays-2560x1440p
+kms_bw@linear-tiling-4-displays-3840x2160p
+kms_flip@blocking-wf_vblank
+kms_flip@wf_vblank-ts-check
+kms_flip@wf_vblank-ts-check-interruptible
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-upscaling
+kms_frontbuffer_tracking@fbc-tiling-linear
+kms_plane_alpha_blend@pipe-A-alpha-basic
+kms_plane_alpha_blend@pipe-A-alpha-opaque-fb
+kms_plane_alpha_blend@pipe-A-alpha-transparent-fb
+kms_plane_alpha_blend@pipe-A-constant-alpha-max
+kms_plane_alpha_blend@pipe-B-alpha-basic
+kms_plane_alpha_blend@pipe-B-alpha-opaque-fb
+kms_plane_alpha_blend@pipe-B-alpha-transparent-fb
+kms_plane_alpha_blend@pipe-B-constant-alpha-max
+kms_plane_alpha_blend@pipe-C-alpha-basic
+kms_plane_alpha_blend@pipe-C-alpha-opaque-fb
+kms_plane_alpha_blend@pipe-C-alpha-transparent-fb
+kms_plane_alpha_blend@pipe-C-constant-alpha-max
+kms_prop_blob@invalid-set-prop-any
+kms_rotation_crc@multiplane-rotation
+kms_rotation_crc@multiplane-rotation-cropping-bottom
+kms_rotation_crc@multiplane-rotation-cropping-top
+kms_setmode@basic
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
new file mode 100644
index 000000000000..4c7d00ce14bc
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
@@ -0,0 +1,5 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+
+# This is generating kernel oops with divide error
+kms_plane_scaling@invalid-parameters \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
new file mode 100644
index 000000000000..56ec021a7679
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
@@ -0,0 +1,25 @@
+kms_bw@linear-tiling-2-displays-2560x1440p,Fail
+kms_bw@linear-tiling-4-displays-2560x1440p,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-upscaling,Fail
+kms_plane_alpha_blend@alpha-basic,Fail
+kms_plane_alpha_blend@alpha-opaque-fb,Fail
+kms_plane_alpha_blend@alpha-transparent-fb,Fail
+kms_plane_alpha_blend@constant-alpha-max,Fail
+kms_plane_alpha_blend@pipe-A-constant-alpha-max,Fail
+kms_plane_alpha_blend@pipe-B-alpha-opaque-fb,Fail
+kms_plane_alpha_blend@pipe-C-constant-alpha-max,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt
new file mode 100644
index 000000000000..f3ba1c4c5d46
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt
@@ -0,0 +1,26 @@
+kms_async_flips@crc
+kms_bw@linear-tiling-2-displays-1920x1080p
+kms_bw@linear-tiling-2-displays-3840x2160p
+kms_bw@linear-tiling-3-displays-1920x1080p
+kms_bw@linear-tiling-3-displays-2560x1440p
+kms_bw@linear-tiling-3-displays-3840x2160p
+kms_bw@linear-tiling-4-displays-1920x1080p
+kms_bw@linear-tiling-4-displays-3840x2160p
+kms_color@ctm-0-25
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling
+kms_plane_alpha_blend@pipe-A-alpha-basic
+kms_plane_alpha_blend@pipe-A-alpha-opaque-fb
+kms_plane_alpha_blend@pipe-A-alpha-transparent-fb
+kms_plane_alpha_blend@pipe-B-alpha-basic
+kms_plane_alpha_blend@pipe-B-alpha-transparent-fb
+kms_plane_alpha_blend@pipe-B-constant-alpha-max
+kms_plane_alpha_blend@pipe-C-alpha-basic
+kms_plane_alpha_blend@pipe-C-alpha-opaque-fb
+kms_plane_alpha_blend@pipe-C-alpha-transparent-fb
+kms_sysfs_edid_timing
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
new file mode 100644
index 000000000000..4c7d00ce14bc
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
@@ -0,0 +1,5 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+
+# This is generating kernel oops with divide error
+kms_plane_scaling@invalid-parameters \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
new file mode 100644
index 000000000000..a6da5544e198
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
@@ -0,0 +1,37 @@
+kms_bw@linear-tiling-2-displays-3840x2160p,Fail
+kms_bw@linear-tiling-3-displays-1920x1080p,Fail
+kms_bw@linear-tiling-3-displays-2560x1440p,Fail
+kms_bw@linear-tiling-3-displays-3840x2160p,Fail
+kms_bw@linear-tiling-4-displays-1920x1080p,Fail
+kms_bw@linear-tiling-4-displays-2560x1440p,Fail
+kms_bw@linear-tiling-4-displays-3840x2160p,Fail
+kms_bw@linear-tiling-5-displays-1920x1080p,Fail
+kms_bw@linear-tiling-5-displays-2560x1440p,Fail
+kms_bw@linear-tiling-5-displays-3840x2160p,Fail
+kms_color@ctm-0-25,Fail
+kms_flip@flip-vs-panning-vs-hang,Timeout
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-upscaling,Fail
+kms_rotation_crc@bad-pixel-format,Fail
+kms_rotation_crc@multiplane-rotation,Fail
+kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail
+kms_rotation_crc@multiplane-rotation-cropping-top,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-flakes.txt
new file mode 100644
index 000000000000..1cd910ee06df
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-flakes.txt
@@ -0,0 +1,5 @@
+kms_draw_crc@.*
+kms_flip@blocking-absolute-wf_vblank
+kms_flip@bo-too-big-interruptible
+kms_flip@busy-flip
+kms_flip@flip-vs-rmfb-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
new file mode 100644
index 000000000000..1d0621750b14
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
@@ -0,0 +1,11 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+
+# GPU hangs, then the whole machine
+gem_eio.*
+
+# Whole machine hangs
+kms_flip@absolute-wf_vblank@a-edp1
+
+# This is generating kernel oops with divide error
+kms_plane_scaling@invalid-parameters \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
new file mode 100644
index 000000000000..967327ddc1ac
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
@@ -0,0 +1,48 @@
+kms_bw@linear-tiling-2-displays-1920x1080p,Fail
+kms_bw@linear-tiling-2-displays-2560x1440p,Fail
+kms_bw@linear-tiling-2-displays-3840x2160p,Fail
+kms_bw@linear-tiling-3-displays-1920x1080p,Fail
+kms_bw@linear-tiling-3-displays-2560x1440p,Fail
+kms_bw@linear-tiling-3-displays-3840x2160p,Fail
+kms_bw@linear-tiling-4-displays-1920x1080p,Fail
+kms_bw@linear-tiling-4-displays-2560x1440p,Fail
+kms_bw@linear-tiling-4-displays-3840x2160p,Fail
+kms_fbcon_fbt@fbc,Fail
+kms_fbcon_fbt@fbc-suspend,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-upscaling,Fail
+kms_frontbuffer_tracking@fbc-tiling-linear,Fail
+kms_plane_alpha_blend@alpha-basic,Fail
+kms_plane_alpha_blend@alpha-opaque-fb,Fail
+kms_plane_alpha_blend@alpha-transparent-fb,Fail
+kms_plane_alpha_blend@constant-alpha-max,Fail
+kms_plane_alpha_blend@pipe-A-alpha-opaque-fb,Fail
+kms_plane_alpha_blend@pipe-A-alpha-transparent-fb,Fail
+kms_plane_alpha_blend@pipe-A-constant-alpha-max,Fail
+kms_plane_alpha_blend@pipe-B-alpha-opaque-fb,Fail
+kms_plane_alpha_blend@pipe-B-alpha-transparent-fb,Fail
+kms_plane_alpha_blend@pipe-B-constant-alpha-max,Fail
+kms_plane_alpha_blend@pipe-C-alpha-opaque-fb,Fail
+kms_plane_alpha_blend@pipe-C-alpha-transparent-fb,Fail
+kms_plane_alpha_blend@pipe-C-constant-alpha-max,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt
new file mode 100644
index 000000000000..c33202e7e2a1
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt
@@ -0,0 +1 @@
+kms_flip@flip-vs-suspend
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
new file mode 100644
index 000000000000..f3be0888a214
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
@@ -0,0 +1,2 @@
+# This is generating kernel oops with divide error
+kms_plane_scaling@invalid-parameters \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
new file mode 100644
index 000000000000..671916067dba
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
@@ -0,0 +1,29 @@
+kms_3d,Fail
+kms_addfb_basic@addfb25-bad-modifier,Fail
+kms_bw@linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@linear-tiling-1-displays-2560x1440p,Fail
+kms_bw@linear-tiling-1-displays-3840x2160p,Fail
+kms_bw@linear-tiling-2-displays-1920x1080p,Fail
+kms_bw@linear-tiling-2-displays-2560x1440p,Fail
+kms_bw@linear-tiling-2-displays-3840x2160p,Fail
+kms_bw@linear-tiling-3-displays-1920x1080p,Fail
+kms_bw@linear-tiling-3-displays-2560x1440p,Fail
+kms_bw@linear-tiling-3-displays-3840x2160p,Fail
+kms_color@pipe-A-invalid-gamma-lut-sizes,Fail
+kms_color@pipe-B-invalid-gamma-lut-sizes,Fail
+kms_force_connector_basic@force-connector-state,Fail
+kms_force_connector_basic@force-edid,Fail
+kms_force_connector_basic@force-load-detect,Fail
+kms_force_connector_basic@prune-stale-modes,Fail
+kms_invalid_mode@int-max-clock,Fail
+kms_plane_scaling@planes-upscale-20x20,Fail
+kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-25,Fail
+kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-5,Fail
+kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-75,Fail
+kms_plane_scaling@upscale-with-modifier-20x20,Fail
+kms_plane_scaling@upscale-with-pixel-format-20x20,Fail
+kms_plane_scaling@upscale-with-rotation-20x20,Fail
+kms_properties@get_properties-sanity-atomic,Fail
+kms_properties@plane-properties-atomic,Fail
+kms_properties@plane-properties-legacy,Fail
+kms_rmfb@close-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
new file mode 100644
index 000000000000..6ff81d00e84e
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
@@ -0,0 +1,10 @@
+kms_addfb_basic@addfb25-bad-modifier,Fail
+kms_bw@linear-tiling-1-displays-2560x1440p,Fail
+kms_bw@linear-tiling-2-displays-1920x1080p,Fail
+kms_bw@linear-tiling-2-displays-2560x1440p,Fail
+kms_bw@linear-tiling-2-displays-3840x2160p,Fail
+kms_bw@linear-tiling-3-displays-2560x1440p,Fail
+kms_bw@linear-tiling-3-displays-3840x2160p,Fail
+kms_color@pipe-A-invalid-gamma-lut-sizes,Fail
+kms_plane_scaling@upscale-with-rotation-20x20,Fail
+kms_rmfb@close-fd,Fail \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
new file mode 100644
index 000000000000..208890b79eb0
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
@@ -0,0 +1,14 @@
+core_setmaster_vs_auth
+kms_bw@linear-tiling-1-displays-1920x1080p
+kms_bw@linear-tiling-1-displays-3840x2160p
+kms_bw@linear-tiling-3-displays-1920x1080p
+kms_cursor_legacy@cursor-vs-flip-atomic
+kms_plane_scaling@invalid-num-scalers
+kms_plane_scaling@planes-upscale-20x20
+kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-5
+kms_plane_scaling@upscale-with-modifier-20x20
+kms_plane_scaling@upscale-with-pixel-format-20x20
+kms_prop_blob@invalid-set-prop-any
+kms_properties@get_properties-sanity-atomic
+kms_properties@plane-properties-atomic
+kms_properties@plane-properties-legacy \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt b/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt
new file mode 100644
index 000000000000..860e702091e2
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt
@@ -0,0 +1,12 @@
+kms_3d,Fail
+kms_properties@connector-properties-atomic,Fail
+kms_properties@get_properties-sanity-atomic,Fail
+kms_properties@get_properties-sanity-non-atomic,Fail
+kms_properties@connector-properties-legacy,Fail
+kms_cursor_legacy@forked-bo,Fail
+kms_cursor_legacy@forked-move,Fail
+kms_cursor_legacy@single-bo,Fail
+kms_cursor_legacy@single-move,Fail
+kms_cursor_legacy@torture-bo,Fail
+kms_cursor_legacy@torture-move,Fail
+kms_hdmi_inject@inject-4k,Fail \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/meson-g12b-flakes.txt b/drivers/gpu/drm/ci/xfails/meson-g12b-flakes.txt
new file mode 100644
index 000000000000..b63329d06767
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/meson-g12b-flakes.txt
@@ -0,0 +1,4 @@
+kms_force_connector_basic@force-connector-state
+kms_force_connector_basic@force-edid
+kms_force_connector_basic@force-load-detect
+kms_force_connector_basic@prune-stale-modes \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
new file mode 100644
index 000000000000..9981682feab2
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
@@ -0,0 +1,15 @@
+kms_3d,Fail
+kms_addfb_basic@addfb25-bad-modifier,Fail
+kms_cursor_legacy@all-pipes-forked-bo,Fail
+kms_cursor_legacy@all-pipes-forked-move,Fail
+kms_cursor_legacy@all-pipes-single-bo,Fail
+kms_cursor_legacy@all-pipes-single-move,Fail
+kms_cursor_legacy@all-pipes-torture-bo,Fail
+kms_cursor_legacy@all-pipes-torture-move,Fail
+kms_cursor_legacy@pipe-A-forked-bo,Fail
+kms_cursor_legacy@pipe-A-forked-move,Fail
+kms_cursor_legacy@pipe-A-single-bo,Fail
+kms_cursor_legacy@pipe-A-single-move,Fail
+kms_cursor_legacy@pipe-A-torture-bo,Fail
+kms_cursor_legacy@pipe-A-torture-move,Fail
+kms_hdmi_inject@inject-4k,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-flakes.txt
new file mode 100644
index 000000000000..0e3b60d3fade
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-flakes.txt
@@ -0,0 +1,4 @@
+kms_force_connector_basic@force-connector-state
+kms_force_connector_basic@force-edid
+kms_force_connector_basic@force-load-detect
+kms_force_connector_basic@prune-stale-modes
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
new file mode 100644
index 000000000000..88a1fc0a3b0d
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
@@ -0,0 +1,2 @@
+kms_3d,Fail
+kms_addfb_basic@addfb25-bad-modifier,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt
new file mode 100644
index 000000000000..0e3b60d3fade
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt
@@ -0,0 +1,4 @@
+kms_force_connector_basic@force-connector-state
+kms_force_connector_basic@force-edid
+kms_force_connector_basic@force-load-detect
+kms_force_connector_basic@prune-stale-modes
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
new file mode 100644
index 000000000000..cd49c8ce2059
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
@@ -0,0 +1,2 @@
+# Whole machine hangs
+kms_cursor_legacy@all-pipes-torture-move \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-fails.txt
new file mode 100644
index 000000000000..14adeba3b62d
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-fails.txt
@@ -0,0 +1,25 @@
+kms_cursor_legacy@cursor-vs-flip-toggle,Fail
+kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
+kms_cursor_legacy@cursorA-vs-flipA-atomic-transitions,Crash
+kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
+kms_plane@pixel-format,Fail
+kms_plane@pixel-format-source-clamping,Fail
+kms_plane@plane-position-covered,Fail
+kms_plane@plane-position-hole,Fail
+kms_plane@plane-position-hole-dpms,Fail
+kms_plane_alpha_blend@alpha-7efc,Fail
+kms_plane_alpha_blend@coverage-7efc,Fail
+kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
+kms_plane_alpha_blend@pipe-A-alpha-7efc,Fail
+kms_plane_alpha_blend@pipe-A-coverage-7efc,Fail
+kms_plane_alpha_blend@pipe-A-coverage-vs-premult-vs-constant,Fail
+kms_plane_alpha_blend@pipe-B-alpha-7efc,Fail
+kms_plane_alpha_blend@pipe-B-alpha-basic,Fail
+kms_plane_alpha_blend@pipe-B-alpha-opaque-fb,Fail
+kms_plane_alpha_blend@pipe-B-constant-alpha-max,Fail
+kms_plane_alpha_blend@pipe-B-constant-alpha-mid,Fail
+kms_plane_alpha_blend@pipe-B-coverage-7efc,Fail
+kms_plane_alpha_blend@pipe-B-coverage-vs-premult-vs-constant,Fail
+kms_rmfb@close-fd,Fail
+kms_universal_plane@disable-primary-vs-flip-pipe-b,Fail
+kms_universal_plane@universal-plane-pipe-B-sanity,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-flakes.txt
new file mode 100644
index 000000000000..636563d3e59a
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-flakes.txt
@@ -0,0 +1,7 @@
+
+# Test ends up reading CRC from frame before cursor update
+# bug
+# sometimes.. tbd if this is a kernel CRC bug or a test
+kms_cursor_crc@.*
+kms_plane_multiple@atomic-pipe-A-tiling-none
+kms_atomic_transition@modeset-transition-nonblocking-fencing,Fail \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-skips.txt
new file mode 100644
index 000000000000..410e0eeb3161
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-skips.txt
@@ -0,0 +1,23 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+
+# Test incorrectly assumes that CTM support implies gamma/degamma
+# LUT support. None of the subtests handle the case of only having
+# CTM support
+kms_color.*
+
+# 4k@60 is not supported on this hw, but driver doesn't handle it
+# too gracefully.. https://gitlab.freedesktop.org/drm/msm/-/issues/15
+kms_bw@linear-tiling-.*-displays-3840x2160p
+
+# Until igt fix lands: https://patchwork.freedesktop.org/patch/493175/
+kms_bw@linear-tiling-2.*
+kms_bw@linear-tiling-3.*
+kms_bw@linear-tiling-4.*
+kms_bw@linear-tiling-5.*
+kms_bw@linear-tiling-6.*
+
+# igt fix posted: https://patchwork.freedesktop.org/patch/499926/
+# failure mode is flakey due to randomization but fails frequently
+# enough to be detected as a Crash or occasionally UnexpectedPass.
+kms_plane_multiple@atomic-pipe-A-tiling-none
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
new file mode 100644
index 000000000000..09c0c623cd75
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
@@ -0,0 +1,68 @@
+kms_color@ctm-0-25,Fail
+kms_color@ctm-0-50,Fail
+kms_color@ctm-0-75,Fail
+kms_color@ctm-blue-to-red,Fail
+kms_color@ctm-green-to-red,Fail
+kms_color@ctm-negative,Fail
+kms_color@ctm-red-to-blue,Fail
+kms_color@ctm-signed,Fail
+kms_color@pipe-A-ctm-0-25,Fail
+kms_color@pipe-A-ctm-0-5,Fail
+kms_color@pipe-A-ctm-0-75,Fail
+kms_color@pipe-A-ctm-blue-to-red,Fail
+kms_color@pipe-A-ctm-green-to-red,Fail
+kms_color@pipe-A-ctm-max,Fail
+kms_color@pipe-A-ctm-negative,Fail
+kms_color@pipe-A-ctm-red-to-blue,Fail
+kms_color@pipe-A-legacy-gamma,Fail
+kms_cursor_legacy@basic-flip-after-cursor-atomic,Fail
+kms_cursor_legacy@basic-flip-after-cursor-legacy,Fail
+kms_cursor_legacy@basic-flip-after-cursor-varying-size,Fail
+kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
+kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
+kms_cursor_legacy@basic-flip-before-cursor-varying-size,Fail
+kms_cursor_legacy@cursor-vs-flip-atomic,Fail
+kms_cursor_legacy@cursor-vs-flip-atomic-transitions,Fail
+kms_cursor_legacy@cursor-vs-flip-atomic-transitions-varying-size,Fail
+kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_cursor_legacy@cursor-vs-flip-toggle,Fail
+kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
+kms_cursor_legacy@cursorA-vs-flipA-toggle,Fail
+kms_cursor_legacy@flip-vs-cursor-atomic,Fail
+kms_cursor_legacy@flip-vs-cursor-crc-atomic,Fail
+kms_cursor_legacy@flip-vs-cursor-crc-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-legacy,Fail
+kms_cursor_legacy@short-flip-after-cursor-atomic-transitions,Fail
+kms_cursor_legacy@short-flip-after-cursor-atomic-transitions-varying-size,Fail
+kms_cursor_legacy@short-flip-after-cursor-toggle,Fail
+kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Fail
+kms_cursor_legacy@short-flip-before-cursor-atomic-transitions-varying-size,Fail
+kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
+kms_plane@pixel-format,Fail
+kms_plane@pixel-format-source-clamping,Fail
+kms_plane_alpha_blend@alpha-7efc,Fail
+kms_plane_alpha_blend@coverage-7efc,Fail
+kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
+kms_plane_alpha_blend@pipe-A-alpha-7efc,Fail
+kms_plane_alpha_blend@pipe-A-coverage-7efc,Fail
+kms_plane_alpha_blend@pipe-A-coverage-vs-premult-vs-constant,Fail
+kms_plane_cursor@overlay,Fail
+kms_plane_cursor@pipe-A-overlay-size-128,Fail
+kms_plane_cursor@pipe-A-overlay-size-256,Fail
+kms_plane_cursor@pipe-A-overlay-size-64,Fail
+kms_plane_cursor@pipe-A-viewport-size-128,Fail
+kms_plane_cursor@pipe-A-viewport-size-256,Fail
+kms_plane_cursor@pipe-A-viewport-size-64,Fail
+kms_plane_cursor@viewport,Fail
+kms_plane_scaling@downscale-with-pixel-format-factor-0-25,Timeout
+kms_plane_scaling@downscale-with-pixel-format-factor-0-5,Timeout
+kms_plane_scaling@downscale-with-pixel-format-factor-0-75,Timeout
+kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-25,Timeout
+kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-5,Timeout
+kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-75,Timeout
+kms_plane_scaling@plane-scaler-with-clipping-clamping-pixel-formats,Timeout
+kms_plane_scaling@plane-scaler-with-pixel-format-unity-scaling,Timeout
+kms_plane_scaling@planes-downscale-factor-0-25,Fail
+kms_plane_scaling@scaler-with-clipping-clamping,Timeout
+kms_plane_scaling@scaler-with-pixel-format-unity-scaling,Timeout
+kms_rmfb@close-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
new file mode 100644
index 000000000000..5b3aaab7ac3f
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
@@ -0,0 +1,11 @@
+
+
+# Test ends up reading CRC from frame before cursor update
+# bug
+# sometimes.. tbd if this is a kernel CRC bug or a test
+kms_cursor_crc@.*
+kms_cursor_legacy@flip-vs-cursor-toggle
+kms_cursor_legacy@pipe-A-forked-bo
+kms_cursor_legacy@pipe-A-forked-move
+kms_cursor_legacy@short-flip-before-cursor-toggle
+kms_flip@dpms-vs-vblank-race-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
new file mode 100644
index 000000000000..42675f1c6d76
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
@@ -0,0 +1,2 @@
+# Hangs machine
+kms_bw.* \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
new file mode 100644
index 000000000000..2a1baa948e12
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
@@ -0,0 +1,48 @@
+kms_3d,Crash
+kms_bw@linear-tiling-2-displays-1920x1080p,Fail
+kms_bw@linear-tiling-2-displays-2560x1440p,Fail
+kms_bw@linear-tiling-2-displays-3840x2160p,Fail
+kms_bw@linear-tiling-3-displays-1920x1080p,Fail
+kms_bw@linear-tiling-3-displays-2560x1440p,Fail
+kms_bw@linear-tiling-3-displays-3840x2160p,Fail
+kms_force_connector_basic@force-load-detect,Fail
+kms_invalid_mode@int-max-clock,Crash
+kms_plane@pixel-format,Crash
+kms_plane@pixel-format-source-clamping,Crash
+kms_plane@plane-position-hole,Crash
+kms_plane@plane-position-hole-dpms,Crash
+kms_plane_cursor@overlay,Crash
+kms_plane_cursor@pipe-A-overlay-size-128,Fail
+kms_plane_cursor@pipe-A-overlay-size-256,Fail
+kms_plane_cursor@pipe-A-overlay-size-64,Fail
+kms_plane_cursor@pipe-A-primary-size-128,Fail
+kms_plane_cursor@pipe-A-primary-size-256,Fail
+kms_plane_cursor@pipe-A-primary-size-64,Fail
+kms_plane_cursor@pipe-A-viewport-size-128,Fail
+kms_plane_cursor@pipe-A-viewport-size-256,Fail
+kms_plane_cursor@pipe-A-viewport-size-64,Fail
+kms_plane_cursor@pipe-B-overlay-size-128,Fail
+kms_plane_cursor@pipe-B-overlay-size-256,Fail
+kms_plane_cursor@pipe-B-overlay-size-64,Fail
+kms_plane_cursor@pipe-B-primary-size-128,Fail
+kms_plane_cursor@pipe-B-primary-size-256,Fail
+kms_plane_cursor@pipe-B-primary-size-64,Fail
+kms_plane_cursor@pipe-B-viewport-size-128,Fail
+kms_plane_cursor@pipe-B-viewport-size-256,Fail
+kms_plane_cursor@pipe-B-viewport-size-64,Fail
+kms_plane_cursor@primary,Crash
+kms_plane_cursor@viewport,Crash
+kms_plane_lowres@tiling-none,Fail
+kms_plane_scaling@downscale-with-modifier-factor-0-25,Fail
+kms_plane_scaling@downscale-with-rotation-factor-0-25,Fail
+kms_plane_scaling@upscale-with-modifier-20x20,Fail
+kms_plane_scaling@upscale-with-modifier-factor-0-25,Fail
+kms_plane_scaling@upscale-with-pixel-format-20x20,Fail
+kms_plane_scaling@upscale-with-pixel-format-factor-0-25,Fail
+kms_plane_scaling@upscale-with-rotation-20x20,Fail
+kms_prime@basic-crc,Fail
+kms_properties@connector-properties-atomic,Crash
+kms_properties@connector-properties-legacy,Crash
+kms_properties@get_properties-sanity-atomic,Crash
+kms_properties@get_properties-sanity-non-atomic,Crash
+kms_setmode@invalid-clone-single-crtc,Crash
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt
new file mode 100644
index 000000000000..45c54c75c899
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt
@@ -0,0 +1,9 @@
+kms_addfb_basic@addfb25-bad-modifier
+kms_cursor_crc@.*
+kms_flip@basic-flip-vs-wf_vblank
+kms_invalid_mode@int-max-clock,Crash
+kms_pipe_crc_basic@.*
+kms_properties@connector-properties-atomic,Crash
+kms_properties@get_properties-sanity-atomic,Crash
+kms_properties@get_properties-sanity-non-atomic,Crash
+kms_rmfb@close-fd
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
new file mode 100644
index 000000000000..f20c3574b75a
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
@@ -0,0 +1,52 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+
+# Too unstable, machine ends up hanging after lots of Oopses
+kms_cursor_legacy.*
+
+# Started hanging the machine on Linux 5.19-rc2:
+#
+# [IGT] kms_plane_lowres: executing
+# [IGT] kms_plane_lowres: starting subtest pipe-F-tiling-y
+# [IGT] kms_plane_lowres: exiting, ret=77
+# Console: switching to colour frame buffer device 170x48
+# rockchip-drm display-subsystem: [drm] *ERROR* flip_done timed out
+# rockchip-drm display-subsystem: [drm] *ERROR* [CRTC:35:crtc-0] commit wait timed out
+# BUG: spinlock bad magic on CPU#3, kms_plane_lowre/482
+# 8<--- cut here ---
+# Unable to handle kernel paging request at virtual address 7812078e
+# [7812078e] *pgd=00000000
+# Internal error: Oops: 5 [#1] SMP ARM
+# Modules linked in:
+# CPU: 3 PID: 482 Comm: kms_plane_lowre Tainted: G W 5.19.0-rc2-323596-g00535de92171 #1
+# Hardware name: Rockchip (Device Tree)
+# Process kms_plane_lowre (pid: 482, stack limit = 0x1193ac2b)
+# spin_dump from do_raw_spin_lock+0xa4/0xe8
+# do_raw_spin_lock from wait_for_completion_timeout+0x2c/0x120
+# wait_for_completion_timeout from drm_crtc_commit_wait+0x18/0x7c
+# drm_crtc_commit_wait from drm_atomic_helper_wait_for_dependencies+0x44/0x168
+# drm_atomic_helper_wait_for_dependencies from commit_tail+0x34/0x180
+# commit_tail from drm_atomic_helper_commit+0x164/0x18c
+# drm_atomic_helper_commit from drm_atomic_commit+0xac/0xe4
+# drm_atomic_commit from drm_client_modeset_commit_atomic+0x23c/0x284
+# drm_client_modeset_commit_atomic from drm_client_modeset_commit_locked+0x60/0x1c8
+# drm_client_modeset_commit_locked from drm_client_modeset_commit+0x24/0x40
+# drm_client_modeset_commit from drm_fbdev_client_restore+0x58/0x94
+# drm_fbdev_client_restore from drm_client_dev_restore+0x70/0xbc
+# drm_client_dev_restore from drm_release+0xf4/0x114
+# drm_release from __fput+0x74/0x240
+# __fput from task_work_run+0x84/0xb4
+# task_work_run from do_exit+0x34c/0xa20
+# do_exit from do_group_exit+0x34/0x98
+# do_group_exit from __wake_up_parent+0x0/0x18
+# Code: e595c008 12843d19 03e00000 03093168 (15940508)
+# ---[ end trace 0000000000000000 ]---
+# note: kms_plane_lowre[482] exited with preempt_count 1
+# Fixing recursive fault but reboot is needed!
+kms_plane_lowres@pipe-F-tiling-y
+
+# Take too long, we have only two machines, and these are very flaky
+kms_cursor_crc.*
+
+# Machine is hanging in this test, so skip it
+kms_pipe_crc_basic@disable-crc-after-crtc \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
new file mode 100644
index 000000000000..6db08ba6b008
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
@@ -0,0 +1,37 @@
+kms_color@legacy-gamma,Fail
+kms_color@pipe-A-legacy-gamma,Fail
+kms_color@pipe-B-legacy-gamma,Fail
+kms_flip@basic-flip-vs-wf_vblank,Fail
+kms_flip@blocking-wf_vblank,Fail
+kms_flip@dpms-vs-vblank-race,Fail
+kms_flip@flip-vs-absolute-wf_vblank,Fail
+kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail
+kms_flip@flip-vs-blocking-wf-vblank,Fail
+kms_flip@flip-vs-panning,Fail
+kms_flip@flip-vs-panning-interruptible,Fail
+kms_flip@flip-vs-wf_vblank-interruptible,Fail
+kms_flip@plain-flip-fb-recreate,Fail
+kms_flip@plain-flip-fb-recreate-interruptible,Fail
+kms_flip@plain-flip-ts-check,Fail
+kms_flip@plain-flip-ts-check-interruptible,Fail
+kms_flip@wf_vblank-ts-check,Fail
+kms_flip@wf_vblank-ts-check-interruptible,Fail
+kms_invalid_mode@int-max-clock,Fail
+kms_plane@pixel-format,Fail
+kms_plane@pixel-format-source-clamping,Fail
+kms_plane@plane-panning-bottom-right,Fail
+kms_plane@plane-panning-top-left,Fail
+kms_plane@plane-position-covered,Fail
+kms_plane_cursor@pipe-B-overlay-size-128,Fail
+kms_plane_cursor@pipe-B-overlay-size-256,Fail
+kms_plane_cursor@pipe-B-overlay-size-64,Fail
+kms_plane_cursor@pipe-B-primary-size-128,Fail
+kms_plane_cursor@pipe-B-primary-size-256,Fail
+kms_plane_cursor@pipe-B-primary-size-64,Fail
+kms_plane_cursor@pipe-B-viewport-size-128,Fail
+kms_plane_cursor@pipe-B-viewport-size-256,Fail
+kms_plane_cursor@pipe-B-viewport-size-64,Fail
+kms_plane_multiple@atomic-pipe-B-tiling-none,Fail
+kms_plane_multiple@tiling-none,Fail
+kms_prime@basic-crc,Fail
+kms_rmfb@close-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
new file mode 100644
index 000000000000..4c0539b4beaf
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
@@ -0,0 +1,23 @@
+
+kms_cursor_crc@.*
+kms_flip@dpms-vs-vblank-race-interruptible
+kms_flip@flip-vs-expired-vblank
+kms_flip@modeset-vs-vblank-race-interruptible
+kms_pipe_crc_basic@.*
+kms_pipe_crc_basic@compare-crc-sanitycheck-pipe-A
+kms_pipe_crc_basic@compare-crc-sanitycheck-pipe-B
+kms_plane@plane-position-hole
+kms_plane_multiple@atomic-pipe-A-tiling-none
+kms_plane_multiple@atomic-pipe-B-tiling-none
+kms_sequence@get-forked
+kms_sequence@get-forked-busy
+kms_setmode@basic
+kms_universal_plane@universal-plane-pipe-B-functional,UnexpectedPass
+kms_vblank@pipe-A-accuracy-idle
+kms_vblank@pipe-A-query-busy
+kms_vblank@pipe-A-query-forked-busy
+kms_vblank@pipe-A-wait-idle
+kms_vblank@pipe-B-accuracy-idle
+kms_vblank@pipe-B-query-busy
+kms_vblank@pipe-B-query-forked-busy
+kms_vblank@pipe-B-wait-idle
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
new file mode 100644
index 000000000000..10c3d81a919a
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
@@ -0,0 +1,5 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+
+# Too unstable, machine ends up hanging after lots of Oopses
+kms_cursor_legacy.*
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
new file mode 100644
index 000000000000..9586b2339f6f
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
@@ -0,0 +1,38 @@
+kms_addfb_basic@addfb25-bad-modifier,Fail
+kms_addfb_basic@bad-pitch-65536,Fail
+kms_addfb_basic@bo-too-small,Fail
+kms_addfb_basic@size-max,Fail
+kms_addfb_basic@too-high,Fail
+kms_atomic_transition@plane-primary-toggle-with-vblank-wait,Fail
+kms_bw@linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@linear-tiling-1-displays-2560x1440p,Fail
+kms_bw@linear-tiling-1-displays-3840x2160p,Fail
+kms_bw@linear-tiling-2-displays-1920x1080p,Fail
+kms_bw@linear-tiling-2-displays-2560x1440p,Fail
+kms_bw@linear-tiling-2-displays-3840x2160p,Fail
+kms_invalid_mode@int-max-clock,Fail
+kms_plane_scaling@downscale-with-modifier-factor-0-25,Fail
+kms_plane_scaling@downscale-with-rotation-factor-0-25,Fail
+kms_plane_scaling@planes-upscale-20x20,Fail
+kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-25,Fail
+kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-5,Fail
+kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-75,Fail
+kms_plane_scaling@upscale-with-modifier-20x20,Fail
+kms_plane_scaling@upscale-with-modifier-factor-0-25,Fail
+kms_plane_scaling@upscale-with-pixel-format-20x20,Fail
+kms_plane_scaling@upscale-with-pixel-format-factor-0-25,Fail
+kms_plane_scaling@upscale-with-rotation-20x20,Fail
+kms_vblank@crtc-id,Fail
+kms_vblank@invalid,Fail
+kms_vblank@pipe-A-accuracy-idle,Fail
+kms_vblank@pipe-A-query-busy,Fail
+kms_vblank@pipe-A-query-forked,Fail
+kms_vblank@pipe-A-query-forked-busy,Fail
+kms_vblank@pipe-A-query-idle,Fail
+kms_vblank@pipe-A-ts-continuation-idle,Fail
+kms_vblank@pipe-A-ts-continuation-modeset,Fail
+kms_vblank@pipe-A-ts-continuation-suspend,Fail
+kms_vblank@pipe-A-wait-busy,Fail
+kms_vblank@pipe-A-wait-forked,Fail
+kms_vblank@pipe-A-wait-forked-busy,Fail
+kms_vblank@pipe-A-wait-idle,Fail
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-flakes.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-flakes.txt
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-flakes.txt
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
new file mode 100644
index 000000000000..78be18174012
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
@@ -0,0 +1,6 @@
+# Hits a "refcount_t: underflow; use-after-free" in virtio_gpu_fence_event_process
+# When run in a particular order with other tests
+kms_cursor_legacy.*
+
+# Job just hangs without any output
+kms_flip@flip-vs-suspend.* \ No newline at end of file
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index bf8371dc2a61..c44d5bcf1284 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -2203,6 +2203,7 @@ static int drm_mode_create_colorspace_property(struct drm_connector *connector,
/**
* drm_mode_create_hdmi_colorspace_property - create hdmi colorspace property
* @connector: connector to create the Colorspace property on.
+ * @supported_colorspaces: bitmap of supported color spaces
*
* Called by a driver the first time it's needed, must be attached to desired
* HDMI connectors.
@@ -2227,6 +2228,7 @@ EXPORT_SYMBOL(drm_mode_create_hdmi_colorspace_property);
/**
* drm_mode_create_dp_colorspace_property - create dp colorspace property
* @connector: connector to create the Colorspace property on.
+ * @supported_colorspaces: bitmap of supported color spaces
*
* Called by a driver the first time it's needed, must be attached to desired
* DP connectors.
diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c
index ff69cf0fb42a..5d2809de4517 100644
--- a/drivers/gpu/drm/drm_exec.c
+++ b/drivers/gpu/drm/drm_exec.c
@@ -56,7 +56,7 @@ static void drm_exec_unlock_all(struct drm_exec *exec)
struct drm_gem_object *obj;
unsigned long index;
- drm_exec_for_each_locked_object(exec, index, obj) {
+ drm_exec_for_each_locked_object_reverse(exec, index, obj) {
dma_resv_unlock(obj->resv);
drm_gem_object_put(obj);
}
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 0cb646cb04ee..d5c15292ae93 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -38,6 +38,14 @@ static const struct drm_dmi_panel_orientation_data gpd_micropc = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data gpd_onemix2s = {
+ .width = 1200,
+ .height = 1920,
+ .bios_dates = (const char * const []){ "05/21/2018", "10/26/2018",
+ "03/04/2019", NULL },
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data gpd_pocket = {
.width = 1200,
.height = 1920,
@@ -401,6 +409,14 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* One Mix 2S (generic strings, also match on bios date) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
+ },
+ .driver_data = (void *)&gpd_onemix2s,
},
{}
};
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 858c959f7bab..f735b035436c 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -3540,6 +3540,27 @@ enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata)
return map_aux_ch(devdata->i915, devdata->child.aux_channel);
}
+bool intel_bios_dp_has_shared_aux_ch(const struct intel_bios_encoder_data *devdata)
+{
+ struct drm_i915_private *i915;
+ u8 aux_channel;
+ int count = 0;
+
+ if (!devdata || !devdata->child.aux_channel)
+ return false;
+
+ i915 = devdata->i915;
+ aux_channel = devdata->child.aux_channel;
+
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ if (intel_bios_encoder_supports_dp(devdata) &&
+ aux_channel == devdata->child.aux_channel)
+ count++;
+ }
+
+ return count > 1;
+}
+
int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 9680e3e92bb5..49e24b7cf675 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -273,6 +273,7 @@ enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata);
int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata);
int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata);
int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_dp_has_shared_aux_ch(const struct intel_bios_encoder_data *devdata);
int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata);
int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata);
int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 12bd2f322e62..e0e4cb529284 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -5512,8 +5512,13 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
/*
* VBT and straps are liars. Also check HPD as that seems
* to be the most reliable piece of information available.
+ *
+ * ... expect on devices that forgot to hook HPD up for eDP
+ * (eg. Acer Chromebook C710), so we'll check it only if multiple
+ * ports are attempting to use the same AUX CH, according to VBT.
*/
- if (!intel_digital_port_connected(encoder)) {
+ if (intel_bios_dp_has_shared_aux_ch(encoder->devdata) &&
+ !intel_digital_port_connected(encoder)) {
/*
* If this fails, presume the DPCD answer came
* from some other port using the same AUX CH.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 6b6d22c19411..0ba955611dfb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -198,7 +198,7 @@ static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
for_each_gt(gt, i915, id) {
if (!obj->mm.tlb[id])
- return;
+ continue;
intel_gt_invalidate_tlb_full(gt, obj->mm.tlb[id]);
obj->mm.tlb[id] = 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 8f1633c3fb93..73a4a4eb29e0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -100,6 +100,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
st->nents = 0;
for (i = 0; i < page_count; i++) {
struct folio *folio;
+ unsigned long nr_pages;
const unsigned int shrink[] = {
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
0,
@@ -150,6 +151,8 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
}
} while (1);
+ nr_pages = min_t(unsigned long,
+ folio_nr_pages(folio), page_count - i);
if (!i ||
sg->length >= max_segment ||
folio_pfn(folio) != next_pfn) {
@@ -157,13 +160,13 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
sg = sg_next(sg);
st->nents++;
- sg_set_folio(sg, folio, folio_size(folio), 0);
+ sg_set_folio(sg, folio, nr_pages * PAGE_SIZE, 0);
} else {
/* XXX: could overflow? */
- sg->length += folio_size(folio);
+ sg->length += nr_pages * PAGE_SIZE;
}
- next_pfn = folio_pfn(folio) + folio_nr_pages(folio);
- i += folio_nr_pages(folio) - 1;
+ next_pfn = folio_pfn(folio) + nr_pages;
+ i += nr_pages - 1;
/* Check that the i965g/gm workaround works. */
GEM_BUG_ON(gfp & __GFP_DMA32 && next_pfn >= 0x00100000UL);
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index a4ff55aa5e55..7ad36198aab2 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -271,8 +271,17 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
if (GRAPHICS_VER_FULL(rq->i915) >= IP_VER(12, 70))
bit_group_0 |= PIPE_CONTROL_CCS_FLUSH;
+ /*
+ * L3 fabric flush is needed for AUX CCS invalidation
+ * which happens as part of pipe-control so we can
+ * ignore PIPE_CONTROL_FLUSH_L3. Also PIPE_CONTROL_FLUSH_L3
+ * deals with Protected Memory which is not needed for
+ * AUX CCS invalidation and lead to unwanted side effects.
+ */
+ if (mode & EMIT_FLUSH)
+ bit_group_1 |= PIPE_CONTROL_FLUSH_L3;
+
bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH;
- bit_group_1 |= PIPE_CONTROL_FLUSH_L3;
bit_group_1 |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
bit_group_1 |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
/* Wa_1409600907:tgl,adl-p */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index ee15486fed0d..e85d70a62123 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -558,7 +558,6 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
DRIVER_CAPS(i915)->has_logical_contexts = true;
ewma__engine_latency_init(&engine->latency);
- seqcount_init(&engine->stats.execlists.lock);
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index e99a6fa03d45..a7e677598004 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -58,6 +58,7 @@ struct i915_perf_group;
typedef u32 intel_engine_mask_t;
#define ALL_ENGINES ((intel_engine_mask_t)~0ul)
+#define VIRTUAL_ENGINES BIT(BITS_PER_TYPE(intel_engine_mask_t) - 1)
struct intel_hw_status_page {
struct list_head timelines;
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 8a641bcf777c..3292524469d5 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -3550,6 +3550,8 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
+ seqcount_init(&engine->stats.execlists.lock);
+
if (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE)
rcs_submission_override(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index dd0ed941441a..da21f2786b5d 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -511,20 +511,31 @@ void intel_ggtt_unbind_vma(struct i915_address_space *vm,
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
}
+/*
+ * Reserve the top of the GuC address space for firmware images. Addresses
+ * beyond GUC_GGTT_TOP in the GuC address space are inaccessible by GuC,
+ * which makes for a suitable range to hold GuC/HuC firmware images if the
+ * size of the GGTT is 4G. However, on a 32-bit platform the size of the GGTT
+ * is limited to 2G, which is less than GUC_GGTT_TOP, but we reserve a chunk
+ * of the same size anyway, which is far more than needed, to keep the logic
+ * in uc_fw_ggtt_offset() simple.
+ */
+#define GUC_TOP_RESERVE_SIZE (SZ_4G - GUC_GGTT_TOP)
+
static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
{
- u64 size;
+ u64 offset;
int ret;
if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
return 0;
- GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
- size = ggtt->vm.total - GUC_GGTT_TOP;
+ GEM_BUG_ON(ggtt->vm.total <= GUC_TOP_RESERVE_SIZE);
+ offset = ggtt->vm.total - GUC_TOP_RESERVE_SIZE;
- ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, size,
- GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
- PIN_NOEVICT);
+ ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw,
+ GUC_TOP_RESERVE_SIZE, offset,
+ I915_COLOR_UNEVICTABLE, PIN_NOEVICT);
if (ret)
drm_dbg(&ggtt->vm.i915->drm,
"Failed to reserve top of GGTT for GuC\n");
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 957d0aeb0c02..c378cc7c953c 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1094,6 +1094,9 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
I915_BO_ALLOC_PM_VOLATILE);
if (IS_ERR(obj)) {
obj = i915_gem_object_create_shmem(engine->i915, context_size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
/*
* Wa_22016122933: For Media version 13.0, all Media GT shared
* memory needs to be mapped as WC on CPU side and UC (PAT
@@ -1102,8 +1105,6 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
if (intel_gt_needs_wa_22016122933(engine->gt))
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
}
- if (IS_ERR(obj))
- return ERR_CAST(obj);
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index a0e3ef1c65d2..dc7b40e06e38 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1433,6 +1433,36 @@ static void guc_timestamp_ping(struct work_struct *wrk)
int srcu, ret;
/*
+ * Ideally the busyness worker should take a gt pm wakeref because the
+ * worker only needs to be active while gt is awake. However, the
+ * gt_park path cancels the worker synchronously and this complicates
+ * the flow if the worker is also running at the same time. The cancel
+ * waits for the worker and when the worker releases the wakeref, that
+ * would call gt_park and would lead to a deadlock.
+ *
+ * The resolution is to take the global pm wakeref if runtime pm is
+ * already active. If not, we don't need to update the busyness stats as
+ * the stats would already be updated when the gt was parked.
+ *
+ * Note:
+ * - We do not requeue the worker if we cannot take a reference to runtime
+ * pm since intel_guc_busyness_unpark would requeue the worker in the
+ * resume path.
+ *
+ * - If the gt was parked longer than time taken for GT timestamp to roll
+ * over, we ignore those rollovers since we don't care about tracking
+ * the exact GT time. We only care about roll overs when the gt is
+ * active and running workloads.
+ *
+ * - There is a window of time between gt_park and runtime suspend,
+ * where the worker may run. This is acceptable since the worker will
+ * not find any new data to update busyness.
+ */
+ wakeref = intel_runtime_pm_get_if_active(&gt->i915->runtime_pm);
+ if (!wakeref)
+ return;
+
+ /*
* Synchronize with gt reset to make sure the worker does not
* corrupt the engine/guc stats. NB: can't actually block waiting
* for a reset to complete as the reset requires flushing out
@@ -1440,10 +1470,9 @@ static void guc_timestamp_ping(struct work_struct *wrk)
*/
ret = intel_gt_reset_trylock(gt, &srcu);
if (ret)
- return;
+ goto err_trylock;
- with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
- __update_guc_busyness_stats(guc);
+ __update_guc_busyness_stats(guc);
/* adjust context stats for overflow */
xa_for_each(&guc->context_lookup, index, ce)
@@ -1452,6 +1481,9 @@ static void guc_timestamp_ping(struct work_struct *wrk)
intel_gt_reset_unlock(gt, srcu);
guc_enable_busyness_worker(guc);
+
+err_trylock:
+ intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
}
static int guc_action_enable_usage_stats(struct intel_guc *guc)
@@ -5470,6 +5502,9 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
ve->base.flags = I915_ENGINE_IS_VIRTUAL;
+ BUILD_BUG_ON(ilog2(VIRTUAL_ENGINES) < I915_NUM_ENGINES);
+ ve->base.mask = VIRTUAL_ENGINES;
+
intel_context_init(&ve->context, &ve->base);
for (n = 0; n < count; n++) {
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 4ec85308379a..094fca9b0e73 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -49,22 +49,6 @@
static bool enable_out_of_sync = false;
static int preallocated_oos_pages = 8192;
-static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
-{
- struct kvm *kvm = vgpu->vfio_device.kvm;
- int idx;
- bool ret;
-
- if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
- return false;
-
- idx = srcu_read_lock(&kvm->srcu);
- ret = kvm_is_visible_gfn(kvm, gfn);
- srcu_read_unlock(&kvm->srcu, idx);
-
- return ret;
-}
-
/*
* validate a gm address and related range size,
* translate it to host gm address
@@ -1161,31 +1145,6 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
ops->set_pfn(se, s->shadow_page.mfn);
}
-/*
- * Check if can do 2M page
- * @vgpu: target vgpu
- * @entry: target pfn's gtt entry
- *
- * Return 1 if 2MB huge gtt shadowing is possible, 0 if miscondition,
- * negative if found err.
- */
-static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
- struct intel_gvt_gtt_entry *entry)
-{
- const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
- kvm_pfn_t pfn;
-
- if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
- return 0;
-
- if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
- return -EINVAL;
- pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
- if (is_error_noslot_pfn(pfn))
- return -EINVAL;
- return PageTransHuge(pfn_to_page(pfn));
-}
-
static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
struct intel_gvt_gtt_entry *se)
@@ -1279,7 +1238,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
{
const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry se = *ge;
- unsigned long gfn, page_size = PAGE_SIZE;
+ unsigned long gfn;
dma_addr_t dma_addr;
int ret;
@@ -1291,6 +1250,9 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
switch (ge->type) {
case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
gvt_vdbg_mm("shadow 4K gtt entry\n");
+ ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE, &dma_addr);
+ if (ret)
+ return -ENXIO;
break;
case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
gvt_vdbg_mm("shadow 64K gtt entry\n");
@@ -1302,25 +1264,20 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
return split_64KB_gtt_entry(vgpu, spt, index, &se);
case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
gvt_vdbg_mm("shadow 2M gtt entry\n");
- ret = is_2MB_gtt_possible(vgpu, ge);
- if (ret == 0)
+ if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M) ||
+ intel_gvt_dma_map_guest_page(vgpu, gfn,
+ I915_GTT_PAGE_SIZE_2M, &dma_addr))
return split_2MB_gtt_entry(vgpu, spt, index, &se);
- else if (ret < 0)
- return ret;
- page_size = I915_GTT_PAGE_SIZE_2M;
break;
case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
gvt_vgpu_err("GVT doesn't support 1GB entry\n");
return -EINVAL;
default:
GEM_BUG_ON(1);
+ return -EINVAL;
}
- /* direct shadow */
- ret = intel_gvt_dma_map_guest_page(vgpu, gfn, page_size, &dma_addr);
- if (ret)
- return -ENXIO;
-
+ /* Successfully shadowed a 4K or 2M page (without splitting). */
pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT);
ppgtt_set_shadow_entry(spt, &se, index);
return 0;
@@ -1329,11 +1286,9 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
{
struct intel_vgpu *vgpu = spt->vgpu;
- struct intel_gvt *gvt = vgpu->gvt;
- const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
struct intel_gvt_gtt_entry se, ge;
- unsigned long gfn, i;
+ unsigned long i;
int ret;
trace_spt_change(spt->vgpu->id, "born", spt,
@@ -1350,13 +1305,6 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
ppgtt_generate_shadow_entry(&se, s, &ge);
ppgtt_set_shadow_entry(spt, &se, i);
} else {
- gfn = ops->get_pfn(&ge);
- if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
- ops->set_pfn(&se, gvt->gtt.scratch_mfn);
- ppgtt_set_shadow_entry(spt, &se, i);
- continue;
- }
-
ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
if (ret)
goto fail;
@@ -1845,6 +1793,9 @@ static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
if (mm->ppgtt_mm.shadowed)
return 0;
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
+ return -EINVAL;
+
mm->ppgtt_mm.shadowed = true;
for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
@@ -2331,14 +2282,6 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
m.val64 = e.val64;
m.type = e.type;
- /* one PTE update may be issued in multiple writes and the
- * first write may not construct a valid gfn
- */
- if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
- ops->set_pfn(&m, gvt->gtt.scratch_mfn);
- goto out;
- }
-
ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE,
&dma_addr);
if (ret) {
@@ -2355,7 +2298,6 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
ops->clear_present(&m);
}
-out:
ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
@@ -2876,24 +2818,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
}
/**
- * intel_vgpu_reset_gtt - reset the all GTT related status
- * @vgpu: a vGPU
- *
- * This function is called from vfio core to reset reset all
- * GTT related status, including GGTT, PPGTT, scratch page.
- *
- */
-void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
-{
- /* Shadow pages are only created when there is no page
- * table tracking data, so remove page tracking data after
- * removing the shadow pages.
- */
- intel_vgpu_destroy_all_ppgtt_mm(vgpu);
- intel_vgpu_reset_ggtt(vgpu, true);
-}
-
-/**
* intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
* @gvt: intel gvt device
*
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index a3b0f59ec8bd..4cb183e06e95 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -224,7 +224,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old);
void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
int intel_gvt_init_gtt(struct intel_gvt *gvt);
-void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu);
void intel_gvt_clean_gtt(struct intel_gvt *gvt);
struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 2d65800d8e93..53a0a42a50db 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -34,10 +34,11 @@
#define _GVT_H_
#include <uapi/linux/pci_regs.h>
-#include <linux/kvm_host.h>
#include <linux/vfio.h>
#include <linux/mdev.h>
+#include <asm/kvm_page_track.h>
+
#include "i915_drv.h"
#include "intel_gvt.h"
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 9cd9e9da60dd..42ce20e72db7 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -106,12 +106,10 @@ struct gvt_dma {
#define vfio_dev_to_vgpu(vfio_dev) \
container_of((vfio_dev), struct intel_vgpu, vfio_device)
-static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
- const u8 *val, int len,
- struct kvm_page_track_notifier_node *node);
-static void kvmgt_page_track_flush_slot(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- struct kvm_page_track_notifier_node *node);
+static void kvmgt_page_track_write(gpa_t gpa, const u8 *val, int len,
+ struct kvm_page_track_notifier_node *node);
+static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages,
+ struct kvm_page_track_notifier_node *node);
static ssize_t intel_vgpu_show_description(struct mdev_type *mtype, char *buf)
{
@@ -161,8 +159,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
if (npage == 0)
base_page = cur_page;
- else if (base_page + npage != cur_page) {
- gvt_vgpu_err("The pages are not continuous\n");
+ else if (page_to_pfn(base_page) + npage != page_to_pfn(cur_page)) {
ret = -EINVAL;
npage++;
goto err;
@@ -172,7 +169,8 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
*page = base_page;
return 0;
err:
- gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
+ if (npage)
+ gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
return ret;
}
@@ -352,6 +350,8 @@ __kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn)
{
struct kvmgt_pgfn *p, *res = NULL;
+ lockdep_assert_held(&info->vgpu_lock);
+
hash_for_each_possible(info->ptable, p, hnode, gfn) {
if (gfn == p->gfn) {
res = p;
@@ -654,21 +654,19 @@ out:
static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
-
- if (!vgpu->vfio_device.kvm ||
- vgpu->vfio_device.kvm->mm != current->mm) {
- gvt_vgpu_err("KVM is required to use Intel vGPU\n");
- return -ESRCH;
- }
+ int ret;
if (__kvmgt_vgpu_exist(vgpu))
return -EEXIST;
vgpu->track_node.track_write = kvmgt_page_track_write;
- vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
- kvm_get_kvm(vgpu->vfio_device.kvm);
- kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
- &vgpu->track_node);
+ vgpu->track_node.track_remove_region = kvmgt_page_track_remove_region;
+ ret = kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
+ &vgpu->track_node);
+ if (ret) {
+ gvt_vgpu_err("KVM is required to use Intel vGPU\n");
+ return ret;
+ }
set_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
@@ -703,7 +701,6 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
&vgpu->track_node);
- kvm_put_kvm(vgpu->vfio_device.kvm);
kvmgt_protect_table_destroy(vgpu);
gvt_cache_destroy(vgpu);
@@ -1547,95 +1544,70 @@ static struct mdev_driver intel_vgpu_mdev_driver = {
int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
{
- struct kvm *kvm = info->vfio_device.kvm;
- struct kvm_memory_slot *slot;
- int idx;
+ int r;
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
return -ESRCH;
- idx = srcu_read_lock(&kvm->srcu);
- slot = gfn_to_memslot(kvm, gfn);
- if (!slot) {
- srcu_read_unlock(&kvm->srcu, idx);
- return -EINVAL;
- }
-
- write_lock(&kvm->mmu_lock);
-
if (kvmgt_gfn_is_write_protected(info, gfn))
- goto out;
+ return 0;
- kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
- kvmgt_protect_table_add(info, gfn);
+ r = kvm_write_track_add_gfn(info->vfio_device.kvm, gfn);
+ if (r)
+ return r;
-out:
- write_unlock(&kvm->mmu_lock);
- srcu_read_unlock(&kvm->srcu, idx);
+ kvmgt_protect_table_add(info, gfn);
return 0;
}
int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
{
- struct kvm *kvm = info->vfio_device.kvm;
- struct kvm_memory_slot *slot;
- int idx;
+ int r;
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
return -ESRCH;
- idx = srcu_read_lock(&kvm->srcu);
- slot = gfn_to_memslot(kvm, gfn);
- if (!slot) {
- srcu_read_unlock(&kvm->srcu, idx);
- return -EINVAL;
- }
-
- write_lock(&kvm->mmu_lock);
-
if (!kvmgt_gfn_is_write_protected(info, gfn))
- goto out;
+ return 0;
- kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
- kvmgt_protect_table_del(info, gfn);
+ r = kvm_write_track_remove_gfn(info->vfio_device.kvm, gfn);
+ if (r)
+ return r;
-out:
- write_unlock(&kvm->mmu_lock);
- srcu_read_unlock(&kvm->srcu, idx);
+ kvmgt_protect_table_del(info, gfn);
return 0;
}
-static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
- const u8 *val, int len,
- struct kvm_page_track_notifier_node *node)
+static void kvmgt_page_track_write(gpa_t gpa, const u8 *val, int len,
+ struct kvm_page_track_notifier_node *node)
{
struct intel_vgpu *info =
container_of(node, struct intel_vgpu, track_node);
- if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
+ mutex_lock(&info->vgpu_lock);
+
+ if (kvmgt_gfn_is_write_protected(info, gpa >> PAGE_SHIFT))
intel_vgpu_page_track_handler(info, gpa,
(void *)val, len);
+
+ mutex_unlock(&info->vgpu_lock);
}
-static void kvmgt_page_track_flush_slot(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- struct kvm_page_track_notifier_node *node)
+static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages,
+ struct kvm_page_track_notifier_node *node)
{
- int i;
- gfn_t gfn;
+ unsigned long i;
struct intel_vgpu *info =
container_of(node, struct intel_vgpu, track_node);
- write_lock(&kvm->mmu_lock);
- for (i = 0; i < slot->npages; i++) {
- gfn = slot->base_gfn + i;
- if (kvmgt_gfn_is_write_protected(info, gfn)) {
- kvm_slot_page_track_remove_page(kvm, slot, gfn,
- KVM_PAGE_TRACK_WRITE);
- kvmgt_protect_table_del(info, gfn);
- }
+ mutex_lock(&info->vgpu_lock);
+
+ for (i = 0; i < nr_pages; i++) {
+ if (kvmgt_gfn_is_write_protected(info, gfn + i))
+ kvmgt_protect_table_del(info, gfn + i);
}
- write_unlock(&kvm->mmu_lock);
+
+ mutex_unlock(&info->vgpu_lock);
}
void intel_vgpu_detach_regions(struct intel_vgpu *vgpu)
diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c
index df34e73cba41..60a65435556d 100644
--- a/drivers/gpu/drm/i915/gvt/page_track.c
+++ b/drivers/gpu/drm/i915/gvt/page_track.c
@@ -162,13 +162,9 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
struct intel_vgpu_page_track *page_track;
int ret = 0;
- mutex_lock(&vgpu->vgpu_lock);
-
page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT);
- if (!page_track) {
- ret = -ENXIO;
- goto out;
- }
+ if (!page_track)
+ return -ENXIO;
if (unlikely(vgpu->failsafe)) {
/* Remove write protection to prevent furture traps. */
@@ -179,7 +175,5 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
gvt_err("guest page write error, gpa %llx\n", gpa);
}
-out:
- mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1f65bb33dd21..a8551ce322de 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1199,6 +1199,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
goto err_unlock;
}
+ /*
+ * Register engines early to ensure the engine list is in its final
+ * rb-tree form, lowering the amount of code that has to deal with
+ * the intermediate llist state.
+ */
+ intel_engines_driver_register(dev_priv);
+
return 0;
/*
@@ -1246,8 +1253,6 @@ err_unlock:
void i915_gem_driver_register(struct drm_i915_private *i915)
{
i915_gem_driver_register__shrinker(i915);
-
- intel_engines_driver_register(i915);
}
void i915_gem_driver_unregister(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 7c7da284990d..f59081066a19 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -134,9 +134,7 @@ static void i915_fence_release(struct dma_fence *fence)
i915_sw_fence_fini(&rq->semaphore);
/*
- * Keep one request on each engine for reserved use under mempressure
- * do not use with virtual engines as this really is only needed for
- * kernel contexts.
+ * Keep one request on each engine for reserved use under mempressure.
*
* We do not hold a reference to the engine here and so have to be
* very careful in what rq->engine we poke. The virtual engine is
@@ -166,8 +164,7 @@ static void i915_fence_release(struct dma_fence *fence)
* know that if the rq->execution_mask is a single bit, rq->engine
* can be a physical engine with the exact corresponding mask.
*/
- if (!intel_engine_is_virtual(rq->engine) &&
- is_power_of_2(rq->execution_mask) &&
+ if (is_power_of_2(rq->execution_mask) &&
!cmpxchg(&rq->engine->request_pool, NULL, rq))
return;
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index 9913971fa5d2..25ea76558690 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -334,6 +334,8 @@ static void meson_encoder_hdmi_hpd_notify(struct drm_bridge *bridge,
return;
cec_notifier_set_phys_addr_from_edid(encoder_hdmi->cec_notifier, edid);
+
+ kfree(edid);
} else
cec_notifier_phys_addr_invalidate(encoder_hdmi->cec_notifier);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index a34924523133..a34917b048f9 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -1122,18 +1122,11 @@ nv04_page_flip_emit(struct nouveau_channel *chan,
PUSH_NVSQ(push, NV_SW, NV_SW_PAGE_FLIP, 0x00000000);
PUSH_KICK(push);
- ret = nouveau_fence_new(pfence);
+ ret = nouveau_fence_new(pfence, chan);
if (ret)
goto fail;
- ret = nouveau_fence_emit(*pfence, chan);
- if (ret)
- goto fail_fence_unref;
-
return 0;
-
-fail_fence_unref:
- nouveau_fence_unref(pfence);
fail:
spin_lock_irqsave(&dev->event_lock, flags);
list_del(&s->head);
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 30afbec9e3b1..2edd7bb13fae 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -31,6 +31,7 @@
#include "nouveau_drv.h"
#include "nouveau_dma.h"
+#include "nouveau_exec.h"
#include "nouveau_gem.h"
#include "nouveau_chan.h"
#include "nouveau_abi16.h"
@@ -183,6 +184,20 @@ nouveau_abi16_fini(struct nouveau_abi16 *abi16)
cli->abi16 = NULL;
}
+static inline int
+getparam_dma_ib_max(struct nvif_device *device)
+{
+ const struct nvif_mclass dmas[] = {
+ { NV03_CHANNEL_DMA, 0 },
+ { NV10_CHANNEL_DMA, 0 },
+ { NV17_CHANNEL_DMA, 0 },
+ { NV40_CHANNEL_DMA, 0 },
+ {}
+ };
+
+ return nvif_mclass(&device->object, dmas) < 0 ? NV50_DMA_IB_MAX : 0;
+}
+
int
nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
{
@@ -247,6 +262,12 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
case NOUVEAU_GETPARAM_GRAPH_UNITS:
getparam->value = nvkm_gr_units(gr);
break;
+ case NOUVEAU_GETPARAM_EXEC_PUSH_MAX: {
+ int ib_max = getparam_dma_ib_max(device);
+
+ getparam->value = nouveau_exec_push_max_from_ib_max(ib_max);
+ break;
+ }
default:
NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 19cab37ac69c..0f3bd187ede6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -875,16 +875,10 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
if (ret)
goto out_unlock;
- ret = nouveau_fence_new(&fence);
+ ret = nouveau_fence_new(&fence, chan);
if (ret)
goto out_unlock;
- ret = nouveau_fence_emit(fence, chan);
- if (ret) {
- nouveau_fence_unref(&fence);
- goto out_unlock;
- }
-
/* TODO: figure out a better solution here
*
* wait on the fence here explicitly as going through
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 1fd5ccf41128..7c97b2886807 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -70,11 +70,9 @@ nouveau_channel_idle(struct nouveau_channel *chan)
struct nouveau_fence *fence = NULL;
int ret;
- ret = nouveau_fence_new(&fence);
+ ret = nouveau_fence_new(&fence, chan);
if (!ret) {
- ret = nouveau_fence_emit(fence, chan);
- if (!ret)
- ret = nouveau_fence_wait(fence, false, false);
+ ret = nouveau_fence_wait(fence, false, false);
nouveau_fence_unref(&fence);
}
@@ -259,10 +257,7 @@ static int
nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool priv, u64 runm,
struct nouveau_channel **pchan)
{
- static const struct {
- s32 oclass;
- int version;
- } hosts[] = {
+ const struct nvif_mclass hosts[] = {
{ AMPERE_CHANNEL_GPFIFO_B, 0 },
{ AMPERE_CHANNEL_GPFIFO_A, 0 },
{ TURING_CHANNEL_GPFIFO_A, 0 },
@@ -445,9 +440,11 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
}
/* initialise dma tracking parameters */
- switch (chan->user.oclass & 0x00ff) {
- case 0x006b:
- case 0x006e:
+ switch (chan->user.oclass) {
+ case NV03_CHANNEL_DMA:
+ case NV10_CHANNEL_DMA:
+ case NV17_CHANNEL_DMA:
+ case NV40_CHANNEL_DMA:
chan->user_put = 0x40;
chan->user_get = 0x44;
chan->dma.max = (0x10000 / 4) - 2;
@@ -457,7 +454,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
chan->user_get = 0x44;
chan->user_get_hi = 0x60;
chan->dma.ib_base = 0x10000 / 4;
- chan->dma.ib_max = (0x02000 / 8) - 1;
+ chan->dma.ib_max = NV50_DMA_IB_MAX;
chan->dma.ib_put = 0;
chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
chan->dma.max = chan->dma.ib_base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 1744d95b233e..c52cda82353e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -49,6 +49,9 @@ void nv50_dma_push(struct nouveau_channel *, u64 addr, u32 length,
/* Maximum push buffer size. */
#define NV50_DMA_PUSH_MAX_LENGTH 0x7fffff
+/* Maximum IBs per ring. */
+#define NV50_DMA_IB_MAX ((0x02000 / 8) - 1)
+
/* Object handles - for stuff that's doesn't use handle == oclass. */
enum {
NvDmaFB = 0x80000002,
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 61e84562094a..12feecf71e75 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -209,8 +209,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
goto done;
}
- if (!nouveau_fence_new(&fence))
- nouveau_fence_emit(fence, dmem->migrate.chan);
+ nouveau_fence_new(&fence, dmem->migrate.chan);
migrate_vma_pages(&args);
nouveau_dmem_fence_done(&fence);
dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
@@ -403,8 +402,7 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
}
}
- if (!nouveau_fence_new(&fence))
- nouveau_fence_emit(fence, chunk->drm->dmem->migrate.chan);
+ nouveau_fence_new(&fence, chunk->drm->dmem->migrate.chan);
migrate_device_pages(src_pfns, dst_pfns, npages);
nouveau_dmem_fence_done(&fence);
migrate_device_finalize(src_pfns, dst_pfns, npages);
@@ -677,8 +675,7 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
addr += PAGE_SIZE;
}
- if (!nouveau_fence_new(&fence))
- nouveau_fence_emit(fence, drm->dmem->migrate.chan);
+ nouveau_fence_new(&fence, drm->dmem->migrate.chan);
migrate_vma_pages(args);
nouveau_dmem_fence_done(&fence);
nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 1fe17ff95f5e..e73a233c6572 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -189,21 +189,12 @@ u_free(void *addr)
static inline void *
u_memcpya(uint64_t user, unsigned int nmemb, unsigned int size)
{
- void *mem;
- void __user *userptr = (void __force __user *)(uintptr_t)user;
+ void __user *userptr = u64_to_user_ptr(user);
+ size_t bytes;
- size *= nmemb;
-
- mem = kvmalloc(size, GFP_KERNEL);
- if (!mem)
- return ERR_PTR(-ENOMEM);
-
- if (copy_from_user(mem, userptr, size)) {
- u_free(mem);
- return ERR_PTR(-EFAULT);
- }
-
- return mem;
+ if (unlikely(check_mul_overflow(nmemb, size, &bytes)))
+ return ERR_PTR(-EOVERFLOW);
+ return vmemdup_user(userptr, bytes);
}
#include <nvif/object.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
index a90c4cd8cbb2..c1837ba95fb5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -96,7 +96,8 @@ nouveau_exec_job_submit(struct nouveau_job *job)
unsigned long index;
int ret;
- ret = nouveau_fence_new(&exec_job->fence);
+ /* Create a new fence, but do not emit yet. */
+ ret = nouveau_fence_create(&exec_job->fence, exec_job->chan);
if (ret)
return ret;
@@ -170,13 +171,17 @@ nouveau_exec_job_run(struct nouveau_job *job)
nv50_dma_push(chan, p->va, p->va_len, no_prefetch);
}
- ret = nouveau_fence_emit(fence, chan);
+ ret = nouveau_fence_emit(fence);
if (ret) {
+ nouveau_fence_unref(&exec_job->fence);
NV_PRINTK(err, job->cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
return ERR_PTR(ret);
}
+ /* The fence was emitted successfully, set the job's fence pointer to
+ * NULL in order to avoid freeing it up when the job is cleaned up.
+ */
exec_job->fence = NULL;
return &fence->base;
@@ -189,7 +194,7 @@ nouveau_exec_job_free(struct nouveau_job *job)
nouveau_job_free(job);
- nouveau_fence_unref(&exec_job->fence);
+ kfree(exec_job->fence);
kfree(exec_job->push.s);
kfree(exec_job);
}
@@ -208,7 +213,7 @@ nouveau_exec_job_timeout(struct nouveau_job *job)
nouveau_sched_entity_fini(job->entity);
- return DRM_GPU_SCHED_STAT_ENODEV;
+ return DRM_GPU_SCHED_STAT_NOMINAL;
}
static struct nouveau_job_ops nouveau_exec_job_ops = {
@@ -374,7 +379,7 @@ nouveau_exec_ioctl_exec(struct drm_device *dev,
struct nouveau_channel *chan = NULL;
struct nouveau_exec_job_args args = {};
struct drm_nouveau_exec *req = data;
- int ret = 0;
+ int push_max, ret = 0;
if (unlikely(!abi16))
return -ENOMEM;
@@ -399,9 +404,10 @@ nouveau_exec_ioctl_exec(struct drm_device *dev,
if (!chan->dma.ib_max)
return nouveau_abi16_put(abi16, -ENOSYS);
- if (unlikely(req->push_count > NOUVEAU_GEM_MAX_PUSH)) {
+ push_max = nouveau_exec_push_max_from_ib_max(chan->dma.ib_max);
+ if (unlikely(req->push_count > push_max)) {
NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
- req->push_count, NOUVEAU_GEM_MAX_PUSH);
+ req->push_count, push_max);
return nouveau_abi16_put(abi16, -EINVAL);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.h b/drivers/gpu/drm/nouveau/nouveau_exec.h
index 778cacd90f65..5488d337bcc0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.h
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.h
@@ -51,4 +51,14 @@ int nouveau_exec_job_init(struct nouveau_exec_job **job,
int nouveau_exec_ioctl_exec(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+static inline unsigned int
+nouveau_exec_push_max_from_ib_max(int ib_max)
+{
+ /* Limit the number of IBs per job to half the size of the ring in order
+ * to avoid the ring running dry between submissions and preserve one
+ * more slot for the job's HW fence.
+ */
+ return ib_max > 1 ? ib_max / 2 - 1 : 0;
+}
+
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 77c739a55b19..ca762ea55413 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -205,16 +205,13 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
}
int
-nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
+nouveau_fence_emit(struct nouveau_fence *fence)
{
+ struct nouveau_channel *chan = unrcu_pointer(fence->channel);
struct nouveau_fence_chan *fctx = chan->fence;
struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
int ret;
- if (unlikely(!chan->fence))
- return -ENODEV;
-
- fence->channel = chan;
fence->timeout = jiffies + (15 * HZ);
if (priv->uevent)
@@ -406,18 +403,41 @@ nouveau_fence_unref(struct nouveau_fence **pfence)
}
int
-nouveau_fence_new(struct nouveau_fence **pfence)
+nouveau_fence_create(struct nouveau_fence **pfence,
+ struct nouveau_channel *chan)
{
struct nouveau_fence *fence;
+ if (unlikely(!chan->fence))
+ return -ENODEV;
+
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return -ENOMEM;
+ fence->channel = chan;
+
*pfence = fence;
return 0;
}
+int
+nouveau_fence_new(struct nouveau_fence **pfence,
+ struct nouveau_channel *chan)
+{
+ int ret = 0;
+
+ ret = nouveau_fence_create(pfence, chan);
+ if (ret)
+ return ret;
+
+ ret = nouveau_fence_emit(*pfence);
+ if (ret)
+ nouveau_fence_unref(pfence);
+
+ return ret;
+}
+
static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
{
return "nouveau";
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 2c72d96ef17d..64d33ae7f356 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -17,10 +17,11 @@ struct nouveau_fence {
unsigned long timeout;
};
-int nouveau_fence_new(struct nouveau_fence **);
+int nouveau_fence_create(struct nouveau_fence **, struct nouveau_channel *);
+int nouveau_fence_new(struct nouveau_fence **, struct nouveau_channel *);
void nouveau_fence_unref(struct nouveau_fence **);
-int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
+int nouveau_fence_emit(struct nouveau_fence *);
bool nouveau_fence_done(struct nouveau_fence *);
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index c0b10d8d3d03..a0d303e5ce3d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -914,11 +914,8 @@ revalidate:
}
}
- ret = nouveau_fence_new(&fence);
- if (!ret)
- ret = nouveau_fence_emit(fence, chan);
+ ret = nouveau_fence_new(&fence, chan);
if (ret) {
- nouveau_fence_unref(&fence);
NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
goto out;
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index 88217185e0f3..3b7ea5221226 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -375,14 +375,20 @@ nouveau_sched_run_job(struct drm_sched_job *sched_job)
static enum drm_gpu_sched_stat
nouveau_sched_timedout_job(struct drm_sched_job *sched_job)
{
+ struct drm_gpu_scheduler *sched = sched_job->sched;
struct nouveau_job *job = to_nouveau_job(sched_job);
+ enum drm_gpu_sched_stat stat = DRM_GPU_SCHED_STAT_NOMINAL;
- NV_PRINTK(warn, job->cli, "Job timed out.\n");
+ drm_sched_stop(sched, sched_job);
if (job->ops->timeout)
- return job->ops->timeout(job);
+ stat = job->ops->timeout(job);
+ else
+ NV_PRINTK(warn, job->cli, "Generic job timeout.\n");
+
+ drm_sched_start(sched, true);
- return DRM_GPU_SCHED_STAT_ENODEV;
+ return stat;
}
static void
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index c87a57c9c592..22dd8b445685 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -123,7 +123,7 @@ int radeon_sa_bo_new(struct radeon_sa_manager *sa_manager,
unsigned int size, unsigned int align)
{
struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size,
- GFP_KERNEL, true, align);
+ GFP_KERNEL, false, align);
if (IS_ERR(sa)) {
*sa_bo = NULL;
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index c1dfbfcaa000..bccb33b900f3 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -118,7 +118,7 @@ void drm_kunit_helper_free_device(struct kunit *test, struct device *dev)
kunit_release_action(test,
kunit_action_platform_driver_unregister,
- pdev);
+ &fake_platform_driver);
}
EXPORT_SYMBOL_GPL(drm_kunit_helper_free_device);
diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c
index 186b28dc7038..05d5e7af6d25 100644
--- a/drivers/gpu/drm/tests/drm_mm_test.c
+++ b/drivers/gpu/drm/tests/drm_mm_test.c
@@ -939,7 +939,7 @@ static void drm_test_mm_insert_range(struct kunit *test)
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max - 1));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max / 2));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
- max / 2, max / 2));
+ max / 2, max));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
max / 4 + 1, 3 * max / 4 - 1));
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index c5bb683e440c..0187539ff5ea 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -70,10 +70,10 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)");
#define READ_STATUS_SIZE 13
#define MISC_VALUE_SIZE 4
-#define CMD_TIMEOUT msecs_to_jiffies(200)
-#define DATA_TIMEOUT msecs_to_jiffies(1000)
-#define IDLE_TIMEOUT msecs_to_jiffies(2000)
-#define FIRST_FRAME_TIMEOUT msecs_to_jiffies(2000)
+#define CMD_TIMEOUT 200
+#define DATA_TIMEOUT 1000
+#define IDLE_TIMEOUT 2000
+#define FIRST_FRAME_TIMEOUT 2000
#define MISC_REQ_GET_SET_ECO_A 0xff
#define MISC_REQ_GET_SET_ECO_B 0x35
@@ -389,7 +389,7 @@ static void gm12u320_fb_update_work(struct work_struct *work)
* switches back to showing its logo.
*/
queue_delayed_work(system_long_wq, &gm12u320->fb_update.work,
- IDLE_TIMEOUT);
+ msecs_to_jiffies(IDLE_TIMEOUT));
return;
err:
diff --git a/drivers/gpu/drm/virtio/virtgpu_submit.c b/drivers/gpu/drm/virtio/virtgpu_submit.c
index 3c00135ead45..5c514946bbad 100644
--- a/drivers/gpu/drm/virtio/virtgpu_submit.c
+++ b/drivers/gpu/drm/virtio/virtgpu_submit.c
@@ -361,7 +361,6 @@ static void virtio_gpu_complete_submit(struct virtio_gpu_submit *submit)
submit->buf = NULL;
submit->buflist = NULL;
submit->sync_file = NULL;
- submit->out_fence = NULL;
submit->out_fence_fd = -1;
}
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index d5d4f642d367..3c99fb8b54e2 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -408,15 +408,10 @@ void vkms_set_composer(struct vkms_output *out, bool enabled)
if (enabled)
drm_crtc_vblank_get(&out->crtc);
- mutex_lock(&out->enabled_lock);
+ spin_lock_irq(&out->lock);
old_enabled = out->composer_enabled;
out->composer_enabled = enabled;
-
- /* the composition wasn't enabled, so unlock the lock to make sure the lock
- * will be balanced even if we have a failed commit
- */
- if (!out->composer_enabled)
- mutex_unlock(&out->enabled_lock);
+ spin_unlock_irq(&out->lock);
if (old_enabled)
drm_crtc_vblank_put(&out->crtc);
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 3c5ebf106b66..61e500b8c9da 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -16,7 +16,7 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
struct drm_crtc *crtc = &output->crtc;
struct vkms_crtc_state *state;
u64 ret_overrun;
- bool ret, fence_cookie, composer_enabled;
+ bool ret, fence_cookie;
fence_cookie = dma_fence_begin_signalling();
@@ -25,15 +25,15 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
if (ret_overrun != 1)
pr_warn("%s: vblank timer overrun\n", __func__);
+ spin_lock(&output->lock);
ret = drm_crtc_handle_vblank(crtc);
if (!ret)
DRM_ERROR("vkms failure on handling vblank");
state = output->composer_state;
- composer_enabled = output->composer_enabled;
- mutex_unlock(&output->enabled_lock);
+ spin_unlock(&output->lock);
- if (state && composer_enabled) {
+ if (state && output->composer_enabled) {
u64 frame = drm_crtc_accurate_vblank_count(crtc);
/* update frame_start only if a queued vkms_composer_worker()
@@ -295,7 +295,6 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
spin_lock_init(&vkms_out->lock);
spin_lock_init(&vkms_out->composer_lock);
- mutex_init(&vkms_out->enabled_lock);
vkms_out->composer_workq = alloc_ordered_workqueue("vkms_composer", 0);
if (!vkms_out->composer_workq)
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index c7ae6c2ba1df..8f5710debb1e 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -108,10 +108,8 @@ struct vkms_output {
struct workqueue_struct *composer_workq;
/* protects concurrent access to composer */
spinlock_t lock;
- /* guarantees that if the composer is enabled, a job will be queued */
- struct mutex enabled_lock;
- /* protected by @enabled_lock */
+ /* protected by @lock */
bool composer_enabled;
struct vkms_crtc_state *composer_state;
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 8db4ae05febc..e098cc7b3944 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1802,7 +1802,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
return 0;
}
-#ifdef CONFIG_PM
static int mt_suspend(struct hid_device *hdev, pm_message_t state)
{
struct mt_device *td = hid_get_drvdata(hdev);
@@ -1836,7 +1835,6 @@ static int mt_resume(struct hid_device *hdev)
return 0;
}
-#endif
static void mt_remove(struct hid_device *hdev)
{
@@ -2259,10 +2257,8 @@ static struct hid_driver mt_driver = {
.usage_table = mt_grabbed_usages,
.event = mt_event,
.report = mt_report,
-#ifdef CONFIG_PM
- .suspend = mt_suspend,
- .reset_resume = mt_reset_resume,
- .resume = mt_resume,
-#endif
+ .suspend = pm_ptr(mt_suspend),
+ .reset_resume = pm_ptr(mt_reset_resume),
+ .resume = pm_ptr(mt_resume),
};
module_hid_driver(mt_driver);
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 84e7ba5314d3..d4af17fdba46 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -436,7 +436,6 @@ static void rmi_report(struct hid_device *hid, struct hid_report *report)
input_sync(field->hidinput->input);
}
-#ifdef CONFIG_PM
static int rmi_suspend(struct hid_device *hdev, pm_message_t message)
{
struct rmi_data *data = hid_get_drvdata(hdev);
@@ -483,7 +482,6 @@ out:
hid_hw_close(hdev);
return ret;
}
-#endif /* CONFIG_PM */
static int rmi_hid_reset(struct rmi_transport_dev *xport, u16 reset_addr)
{
@@ -774,11 +772,9 @@ static struct hid_driver rmi_driver = {
.report = rmi_report,
.input_mapping = rmi_input_mapping,
.input_configured = rmi_input_configured,
-#ifdef CONFIG_PM
- .suspend = rmi_suspend,
- .resume = rmi_post_resume,
- .reset_resume = rmi_post_resume,
-#endif
+ .suspend = pm_ptr(rmi_suspend),
+ .resume = pm_ptr(rmi_post_resume),
+ .reset_resume = pm_ptr(rmi_post_resume),
};
module_hid_driver(rmi_driver);
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 257dd73e37bf..a90ed2ceae84 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1562,7 +1562,6 @@ static int hid_post_reset(struct usb_interface *intf)
return 0;
}
-#ifdef CONFIG_PM
static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
{
int status = 0;
@@ -1654,8 +1653,6 @@ static int hid_reset_resume(struct usb_interface *intf)
return status;
}
-#endif /* CONFIG_PM */
-
static const struct usb_device_id hid_usb_ids[] = {
{ .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
.bInterfaceClass = USB_INTERFACE_CLASS_HID },
@@ -1668,11 +1665,9 @@ static struct usb_driver hid_driver = {
.name = "usbhid",
.probe = usbhid_probe,
.disconnect = usbhid_disconnect,
-#ifdef CONFIG_PM
- .suspend = hid_suspend,
- .resume = hid_resume,
- .reset_resume = hid_reset_resume,
-#endif
+ .suspend = pm_ptr(hid_suspend),
+ .resume = pm_ptr(hid_resume),
+ .reset_resume = pm_ptr(hid_reset_resume),
.pre_reset = hid_pre_reset,
.post_reset = hid_post_reset,
.id_table = hid_usb_ids,
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index ebf15f31d97e..3cabeeabb1ca 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -98,6 +98,7 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
*/
if (version >= VERSION_WIN10_V5) {
msg->msg_sint = VMBUS_MESSAGE_SINT;
+ msg->msg_vtl = ms_hyperv.vtl;
vmbus_connection.msg_conn_id = VMBUS_MESSAGE_CONNECTION_ID_4;
} else {
msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
@@ -482,10 +483,17 @@ void vmbus_set_event(struct vmbus_channel *channel)
++channel->sig_events;
- if (hv_isolation_type_snp())
- hv_ghcb_hypercall(HVCALL_SIGNAL_EVENT, &channel->sig_event,
- NULL, sizeof(channel->sig_event));
- else
+ if (ms_hyperv.paravisor_present) {
+ if (hv_isolation_type_snp())
+ hv_ghcb_hypercall(HVCALL_SIGNAL_EVENT, &channel->sig_event,
+ NULL, sizeof(channel->sig_event));
+ else if (hv_isolation_type_tdx())
+ hv_tdx_hypercall(HVCALL_SIGNAL_EVENT | HV_HYPERCALL_FAST_BIT,
+ channel->sig_event, 0);
+ else
+ WARN_ON_ONCE(1);
+ } else {
hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, channel->sig_event);
+ }
}
EXPORT_SYMBOL_GPL(vmbus_set_event);
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index de6708dbe0df..51e5018ac9b2 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -20,6 +20,7 @@
#include <linux/interrupt.h>
#include <clocksource/hyperv_timer.h>
#include <asm/mshyperv.h>
+#include <linux/set_memory.h>
#include "hyperv_vmbus.h"
/* The one and only */
@@ -56,20 +57,37 @@ int hv_post_message(union hv_connection_id connection_id,
local_irq_save(flags);
- aligned_msg = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ /*
+ * A TDX VM with the paravisor must use the decrypted post_msg_page: see
+ * the comment in struct hv_per_cpu_context. A SNP VM with the paravisor
+ * can use the encrypted hyperv_pcpu_input_arg because it copies the
+ * input into the GHCB page, which has been decrypted by the paravisor.
+ */
+ if (hv_isolation_type_tdx() && ms_hyperv.paravisor_present)
+ aligned_msg = this_cpu_ptr(hv_context.cpu_context)->post_msg_page;
+ else
+ aligned_msg = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
aligned_msg->connectionid = connection_id;
aligned_msg->reserved = 0;
aligned_msg->message_type = message_type;
aligned_msg->payload_size = payload_size;
memcpy((void *)aligned_msg->payload, payload, payload_size);
- if (hv_isolation_type_snp())
- status = hv_ghcb_hypercall(HVCALL_POST_MESSAGE,
- (void *)aligned_msg, NULL,
- sizeof(*aligned_msg));
- else
+ if (ms_hyperv.paravisor_present) {
+ if (hv_isolation_type_tdx())
+ status = hv_tdx_hypercall(HVCALL_POST_MESSAGE,
+ virt_to_phys(aligned_msg), 0);
+ else if (hv_isolation_type_snp())
+ status = hv_ghcb_hypercall(HVCALL_POST_MESSAGE,
+ aligned_msg, NULL,
+ sizeof(*aligned_msg));
+ else
+ status = HV_STATUS_INVALID_PARAMETER;
+ } else {
status = hv_do_hypercall(HVCALL_POST_MESSAGE,
aligned_msg, NULL);
+ }
local_irq_restore(flags);
@@ -78,7 +96,7 @@ int hv_post_message(union hv_connection_id connection_id,
int hv_synic_alloc(void)
{
- int cpu;
+ int cpu, ret = -ENOMEM;
struct hv_per_cpu_context *hv_cpu;
/*
@@ -104,11 +122,29 @@ int hv_synic_alloc(void)
tasklet_init(&hv_cpu->msg_dpc,
vmbus_on_msg_dpc, (unsigned long) hv_cpu);
+ if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
+ hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (hv_cpu->post_msg_page == NULL) {
+ pr_err("Unable to allocate post msg page\n");
+ goto err;
+ }
+
+ ret = set_memory_decrypted((unsigned long)hv_cpu->post_msg_page, 1);
+ if (ret) {
+ pr_err("Failed to decrypt post msg page: %d\n", ret);
+ /* Just leak the page, as it's unsafe to free the page. */
+ hv_cpu->post_msg_page = NULL;
+ goto err;
+ }
+
+ memset(hv_cpu->post_msg_page, 0, PAGE_SIZE);
+ }
+
/*
* Synic message and event pages are allocated by paravisor.
* Skip these pages allocation here.
*/
- if (!hv_isolation_type_snp() && !hv_root_partition) {
+ if (!ms_hyperv.paravisor_present && !hv_root_partition) {
hv_cpu->synic_message_page =
(void *)get_zeroed_page(GFP_ATOMIC);
if (hv_cpu->synic_message_page == NULL) {
@@ -120,29 +156,96 @@ int hv_synic_alloc(void)
(void *)get_zeroed_page(GFP_ATOMIC);
if (hv_cpu->synic_event_page == NULL) {
pr_err("Unable to allocate SYNIC event page\n");
+
+ free_page((unsigned long)hv_cpu->synic_message_page);
+ hv_cpu->synic_message_page = NULL;
goto err;
}
}
+
+ if (!ms_hyperv.paravisor_present &&
+ (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
+ ret = set_memory_decrypted((unsigned long)
+ hv_cpu->synic_message_page, 1);
+ if (ret) {
+ pr_err("Failed to decrypt SYNIC msg page: %d\n", ret);
+ hv_cpu->synic_message_page = NULL;
+
+ /*
+ * Free the event page here so that hv_synic_free()
+ * won't later try to re-encrypt it.
+ */
+ free_page((unsigned long)hv_cpu->synic_event_page);
+ hv_cpu->synic_event_page = NULL;
+ goto err;
+ }
+
+ ret = set_memory_decrypted((unsigned long)
+ hv_cpu->synic_event_page, 1);
+ if (ret) {
+ pr_err("Failed to decrypt SYNIC event page: %d\n", ret);
+ hv_cpu->synic_event_page = NULL;
+ goto err;
+ }
+
+ memset(hv_cpu->synic_message_page, 0, PAGE_SIZE);
+ memset(hv_cpu->synic_event_page, 0, PAGE_SIZE);
+ }
}
return 0;
+
err:
/*
* Any memory allocations that succeeded will be freed when
* the caller cleans up by calling hv_synic_free()
*/
- return -ENOMEM;
+ return ret;
}
void hv_synic_free(void)
{
- int cpu;
+ int cpu, ret;
for_each_present_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
+ /* It's better to leak the page if the encryption fails. */
+ if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
+ if (hv_cpu->post_msg_page) {
+ ret = set_memory_encrypted((unsigned long)
+ hv_cpu->post_msg_page, 1);
+ if (ret) {
+ pr_err("Failed to encrypt post msg page: %d\n", ret);
+ hv_cpu->post_msg_page = NULL;
+ }
+ }
+ }
+
+ if (!ms_hyperv.paravisor_present &&
+ (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
+ if (hv_cpu->synic_message_page) {
+ ret = set_memory_encrypted((unsigned long)
+ hv_cpu->synic_message_page, 1);
+ if (ret) {
+ pr_err("Failed to encrypt SYNIC msg page: %d\n", ret);
+ hv_cpu->synic_message_page = NULL;
+ }
+ }
+
+ if (hv_cpu->synic_event_page) {
+ ret = set_memory_encrypted((unsigned long)
+ hv_cpu->synic_event_page, 1);
+ if (ret) {
+ pr_err("Failed to encrypt SYNIC event page: %d\n", ret);
+ hv_cpu->synic_event_page = NULL;
+ }
+ }
+ }
+
+ free_page((unsigned long)hv_cpu->post_msg_page);
free_page((unsigned long)hv_cpu->synic_event_page);
free_page((unsigned long)hv_cpu->synic_message_page);
}
@@ -170,7 +273,7 @@ void hv_synic_enable_regs(unsigned int cpu)
simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
simp.simp_enabled = 1;
- if (hv_isolation_type_snp() || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition) {
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
@@ -189,7 +292,7 @@ void hv_synic_enable_regs(unsigned int cpu)
siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.siefp_enabled = 1;
- if (hv_isolation_type_snp() || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition) {
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
@@ -272,7 +375,7 @@ void hv_synic_disable_regs(unsigned int cpu)
* addresses.
*/
simp.simp_enabled = 0;
- if (hv_isolation_type_snp() || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition) {
iounmap(hv_cpu->synic_message_page);
hv_cpu->synic_message_page = NULL;
} else {
@@ -284,7 +387,7 @@ void hv_synic_disable_regs(unsigned int cpu)
siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.siefp_enabled = 0;
- if (hv_isolation_type_snp() || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition) {
iounmap(hv_cpu->synic_event_page);
hv_cpu->synic_event_page = NULL;
} else {
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 0d7a3ba66396..e000fa3b9f97 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/mman.h>
@@ -646,7 +647,7 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
void *v)
{
struct memory_notify *mem = (struct memory_notify *)v;
- unsigned long flags, pfn_count;
+ unsigned long pfn_count;
switch (val) {
case MEM_ONLINE:
@@ -655,21 +656,22 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
break;
case MEM_OFFLINE:
- spin_lock_irqsave(&dm_device.ha_lock, flags);
- pfn_count = hv_page_offline_check(mem->start_pfn,
- mem->nr_pages);
- if (pfn_count <= dm_device.num_pages_onlined) {
- dm_device.num_pages_onlined -= pfn_count;
- } else {
- /*
- * We're offlining more pages than we managed to online.
- * This is unexpected. In any case don't let
- * num_pages_onlined wrap around zero.
- */
- WARN_ON_ONCE(1);
- dm_device.num_pages_onlined = 0;
+ scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
+ pfn_count = hv_page_offline_check(mem->start_pfn,
+ mem->nr_pages);
+ if (pfn_count <= dm_device.num_pages_onlined) {
+ dm_device.num_pages_onlined -= pfn_count;
+ } else {
+ /*
+ * We're offlining more pages than we
+ * managed to online. This is
+ * unexpected. In any case don't let
+ * num_pages_onlined wrap around zero.
+ */
+ WARN_ON_ONCE(1);
+ dm_device.num_pages_onlined = 0;
+ }
}
- spin_unlock_irqrestore(&dm_device.ha_lock, flags);
break;
case MEM_GOING_ONLINE:
case MEM_GOING_OFFLINE:
@@ -721,24 +723,23 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
unsigned long start_pfn;
unsigned long processed_pfn;
unsigned long total_pfn = pfn_count;
- unsigned long flags;
for (i = 0; i < (size/HA_CHUNK); i++) {
start_pfn = start + (i * HA_CHUNK);
- spin_lock_irqsave(&dm_device.ha_lock, flags);
- has->ha_end_pfn += HA_CHUNK;
+ scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
+ has->ha_end_pfn += HA_CHUNK;
- if (total_pfn > HA_CHUNK) {
- processed_pfn = HA_CHUNK;
- total_pfn -= HA_CHUNK;
- } else {
- processed_pfn = total_pfn;
- total_pfn = 0;
- }
+ if (total_pfn > HA_CHUNK) {
+ processed_pfn = HA_CHUNK;
+ total_pfn -= HA_CHUNK;
+ } else {
+ processed_pfn = total_pfn;
+ total_pfn = 0;
+ }
- has->covered_end_pfn += processed_pfn;
- spin_unlock_irqrestore(&dm_device.ha_lock, flags);
+ has->covered_end_pfn += processed_pfn;
+ }
reinit_completion(&dm_device.ol_waitevent);
@@ -758,10 +759,10 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
*/
do_hot_add = false;
}
- spin_lock_irqsave(&dm_device.ha_lock, flags);
- has->ha_end_pfn -= HA_CHUNK;
- has->covered_end_pfn -= processed_pfn;
- spin_unlock_irqrestore(&dm_device.ha_lock, flags);
+ scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
+ has->ha_end_pfn -= HA_CHUNK;
+ has->covered_end_pfn -= processed_pfn;
+ }
break;
}
@@ -781,10 +782,9 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
static void hv_online_page(struct page *pg, unsigned int order)
{
struct hv_hotadd_state *has;
- unsigned long flags;
unsigned long pfn = page_to_pfn(pg);
- spin_lock_irqsave(&dm_device.ha_lock, flags);
+ guard(spinlock_irqsave)(&dm_device.ha_lock);
list_for_each_entry(has, &dm_device.ha_region_list, list) {
/* The page belongs to a different HAS. */
if ((pfn < has->start_pfn) ||
@@ -794,7 +794,6 @@ static void hv_online_page(struct page *pg, unsigned int order)
hv_bring_pgs_online(has, pfn, 1UL << order);
break;
}
- spin_unlock_irqrestore(&dm_device.ha_lock, flags);
}
static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
@@ -803,9 +802,8 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
struct hv_hotadd_gap *gap;
unsigned long residual, new_inc;
int ret = 0;
- unsigned long flags;
- spin_lock_irqsave(&dm_device.ha_lock, flags);
+ guard(spinlock_irqsave)(&dm_device.ha_lock);
list_for_each_entry(has, &dm_device.ha_region_list, list) {
/*
* If the pfn range we are dealing with is not in the current
@@ -852,7 +850,6 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
ret = 1;
break;
}
- spin_unlock_irqrestore(&dm_device.ha_lock, flags);
return ret;
}
@@ -947,7 +944,6 @@ static unsigned long process_hot_add(unsigned long pg_start,
{
struct hv_hotadd_state *ha_region = NULL;
int covered;
- unsigned long flags;
if (pfn_cnt == 0)
return 0;
@@ -979,9 +975,9 @@ static unsigned long process_hot_add(unsigned long pg_start,
ha_region->covered_end_pfn = pg_start;
ha_region->end_pfn = rg_start + rg_size;
- spin_lock_irqsave(&dm_device.ha_lock, flags);
- list_add_tail(&ha_region->list, &dm_device.ha_region_list);
- spin_unlock_irqrestore(&dm_device.ha_lock, flags);
+ scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
+ list_add_tail(&ha_region->list, &dm_device.ha_region_list);
+ }
}
do_pg_range:
@@ -2047,7 +2043,6 @@ static void balloon_remove(struct hv_device *dev)
struct hv_dynmem_device *dm = hv_get_drvdata(dev);
struct hv_hotadd_state *has, *tmp;
struct hv_hotadd_gap *gap, *tmp_gap;
- unsigned long flags;
if (dm->num_pages_ballooned != 0)
pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
@@ -2073,7 +2068,7 @@ static void balloon_remove(struct hv_device *dev)
#endif
}
- spin_lock_irqsave(&dm_device.ha_lock, flags);
+ guard(spinlock_irqsave)(&dm_device.ha_lock);
list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
list_del(&gap->list);
@@ -2082,7 +2077,6 @@ static void balloon_remove(struct hv_device *dev)
list_del(&has->list);
kfree(has);
}
- spin_unlock_irqrestore(&dm_device.ha_lock, flags);
}
static int balloon_suspend(struct hv_device *hv_dev)
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index 6a2258fef1fe..ccad7bca3fd3 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -24,6 +24,7 @@
#include <linux/kmsg_dump.h>
#include <linux/slab.h>
#include <linux/dma-map-ops.h>
+#include <linux/set_memory.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
@@ -359,6 +360,8 @@ int hv_common_cpu_init(unsigned int cpu)
u64 msr_vp_index;
gfp_t flags;
int pgcount = hv_root_partition ? 2 : 1;
+ void *mem;
+ int ret;
/* hv_cpu_init() can be called with IRQs disabled from hv_resume() */
flags = irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL;
@@ -370,14 +373,41 @@ int hv_common_cpu_init(unsigned int cpu)
* allocated if this CPU was previously online and then taken offline
*/
if (!*inputarg) {
- *inputarg = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags);
- if (!(*inputarg))
+ mem = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags);
+ if (!mem)
return -ENOMEM;
if (hv_root_partition) {
outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg);
- *outputarg = (char *)(*inputarg) + HV_HYP_PAGE_SIZE;
+ *outputarg = (char *)mem + HV_HYP_PAGE_SIZE;
+ }
+
+ if (!ms_hyperv.paravisor_present &&
+ (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
+ ret = set_memory_decrypted((unsigned long)mem, pgcount);
+ if (ret) {
+ /* It may be unsafe to free 'mem' */
+ return ret;
+ }
+
+ memset(mem, 0x00, pgcount * HV_HYP_PAGE_SIZE);
}
+
+ /*
+ * In a fully enlightened TDX/SNP VM with more than 64 VPs, if
+ * hyperv_pcpu_input_arg is not NULL, set_memory_decrypted() ->
+ * ... -> cpa_flush()-> ... -> __send_ipi_mask_ex() tries to
+ * use hyperv_pcpu_input_arg as the hypercall input page, which
+ * must be a decrypted page in such a VM, but the page is still
+ * encrypted before set_memory_decrypted() returns. Fix this by
+ * setting *inputarg after the above set_memory_decrypted(): if
+ * hyperv_pcpu_input_arg is NULL, __send_ipi_mask_ex() returns
+ * HV_STATUS_INVALID_PARAMETER immediately, and the function
+ * hv_send_ipi_mask() falls back to orig_apic.send_IPI_mask(),
+ * which may be slightly slower than the hypercall, but still
+ * works correctly in such a VM.
+ */
+ *inputarg = mem;
}
msr_vp_index = hv_get_register(HV_REGISTER_VP_INDEX);
@@ -502,6 +532,12 @@ bool __weak hv_isolation_type_snp(void)
}
EXPORT_SYMBOL_GPL(hv_isolation_type_snp);
+bool __weak hv_isolation_type_tdx(void)
+{
+ return false;
+}
+EXPORT_SYMBOL_GPL(hv_isolation_type_tdx);
+
void __weak hv_setup_vmbus_handler(void (*handler)(void))
{
}
@@ -542,3 +578,9 @@ u64 __weak hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_s
return HV_STATUS_INVALID_PARAMETER;
}
EXPORT_SYMBOL_GPL(hv_ghcb_hypercall);
+
+u64 __weak hv_tdx_hypercall(u64 control, u64 param1, u64 param2)
+{
+ return HV_STATUS_INVALID_PARAMETER;
+}
+EXPORT_SYMBOL_GPL(hv_tdx_hypercall);
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 55f2086841ae..f6b1e710f805 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -124,6 +124,17 @@ struct hv_per_cpu_context {
void *synic_event_page;
/*
+ * The page is only used in hv_post_message() for a TDX VM (with the
+ * paravisor) to post a messages to Hyper-V: when such a VM calls
+ * HVCALL_POST_MESSAGE, it can't use the hyperv_pcpu_input_arg (which
+ * is encrypted in such a VM) as the hypercall input page, because
+ * the input page for HVCALL_POST_MESSAGE must be decrypted in such a
+ * VM, so post_msg_page (which is decrypted in hv_synic_alloc()) is
+ * introduced for this purpose. See hyperv_init() for more comments.
+ */
+ void *post_msg_page;
+
+ /*
* Starting with win8, we can take channel interrupts on any CPU;
* we will manage the tasklet that handles events messages on a per CPU
* basis.
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 67f95a29aeca..edbb38f6956b 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -2287,7 +2287,8 @@ static int vmbus_acpi_add(struct platform_device *pdev)
* Some ancestor of the vmbus acpi device (Gen1 or Gen2
* firmware) is the VMOD that has the mmio ranges. Get that.
*/
- for (ancestor = acpi_dev_parent(device); ancestor;
+ for (ancestor = acpi_dev_parent(device);
+ ancestor && ancestor->handle != ACPI_ROOT_OBJECT;
ancestor = acpi_dev_parent(ancestor)) {
result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
vmbus_walk_resources, NULL);
diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
index 02a71244fc3b..b5b81bd83bb1 100644
--- a/drivers/hwmon/nct6775-core.c
+++ b/drivers/hwmon/nct6775-core.c
@@ -1910,6 +1910,10 @@ static umode_t nct6775_in_is_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct nct6775_data *data = dev_get_drvdata(dev);
int in = index / 5; /* voltage index */
+ int nr = index % 5; /* attribute index */
+
+ if (nr == 1 && data->ALARM_BITS[in] == -1)
+ return 0;
if (!(data->have_in & BIT(in)))
return 0;
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
index dfe82952671b..a9fd9ca45f2a 100644
--- a/drivers/hwspinlock/omap_hwspinlock.c
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -145,7 +145,7 @@ runtime_err:
return ret;
}
-static int omap_hwspinlock_remove(struct platform_device *pdev)
+static void omap_hwspinlock_remove(struct platform_device *pdev)
{
struct hwspinlock_device *bank = platform_get_drvdata(pdev);
int ret;
@@ -153,12 +153,10 @@ static int omap_hwspinlock_remove(struct platform_device *pdev)
ret = hwspin_lock_unregister(bank);
if (ret) {
dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
- return ret;
+ return;
}
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct of_device_id omap_hwspinlock_of_match[] = {
@@ -171,7 +169,7 @@ MODULE_DEVICE_TABLE(of, omap_hwspinlock_of_match);
static struct platform_driver omap_hwspinlock_driver = {
.probe = omap_hwspinlock_probe,
- .remove = omap_hwspinlock_remove,
+ .remove_new = omap_hwspinlock_remove,
.driver = {
.name = "omap_hwspinlock",
.of_match_table = omap_hwspinlock_of_match,
diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c
index 9cf186362ae2..a0fd67fd2934 100644
--- a/drivers/hwspinlock/qcom_hwspinlock.c
+++ b/drivers/hwspinlock/qcom_hwspinlock.c
@@ -69,9 +69,18 @@ static const struct hwspinlock_ops qcom_hwspinlock_ops = {
.unlock = qcom_hwspinlock_unlock,
};
+static const struct regmap_config sfpb_mutex_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x100,
+ .fast_io = true,
+};
+
static const struct qcom_hwspinlock_of_data of_sfpb_mutex = {
.offset = 0x4,
.stride = 0x4,
+ .regmap_config = &sfpb_mutex_config,
};
static const struct regmap_config tcsr_msm8226_mutex_config = {
@@ -197,6 +206,8 @@ static int qcom_hwspinlock_probe(struct platform_device *pdev)
bank->lock[i].priv = devm_regmap_field_alloc(&pdev->dev,
regmap, field);
+ if (IS_ERR(bank->lock[i].priv))
+ return PTR_ERR(bank->lock[i].priv);
}
return devm_hwspin_lock_register(&pdev->dev, bank, &qcom_hwspinlock_ops,
diff --git a/drivers/hwspinlock/u8500_hsem.c b/drivers/hwspinlock/u8500_hsem.c
index 67845c0c9701..1edca1092f29 100644
--- a/drivers/hwspinlock/u8500_hsem.c
+++ b/drivers/hwspinlock/u8500_hsem.c
@@ -120,20 +120,18 @@ static int u8500_hsem_probe(struct platform_device *pdev)
pdata->base_id, num_locks);
}
-static int u8500_hsem_remove(struct platform_device *pdev)
+static void u8500_hsem_remove(struct platform_device *pdev)
{
struct hwspinlock_device *bank = platform_get_drvdata(pdev);
void __iomem *io_base = bank->lock[0].priv - HSEM_REGISTER_OFFSET;
/* clear all interrupts */
writel(0xFFFF, io_base + HSEM_ICRALL);
-
- return 0;
}
static struct platform_driver u8500_hsem_driver = {
.probe = u8500_hsem_probe,
- .remove = u8500_hsem_remove,
+ .remove_new = u8500_hsem_remove,
.driver = {
.name = "u8500_hsem",
},
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index c6d1a345ea6d..9388823bb0bb 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -72,7 +72,7 @@ config I2C_MUX
source "drivers/i2c/muxes/Kconfig"
config I2C_ATR
- tristate "I2C Address Translator (ATR) support"
+ tristate "I2C Address Translator (ATR) support" if COMPILE_TEST
help
Enable support for I2C Address Translator (ATR) chips.
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 9cfe8fc509d7..6644eebedaf3 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -1384,10 +1384,10 @@ config I2C_ICY
config I2C_MLXCPLD
tristate "Mellanox I2C driver"
- depends on X86_64 || COMPILE_TEST
+ depends on X86_64 || (ARM64 && ACPI) || COMPILE_TEST
help
This exposes the Mellanox platform I2C busses to the linux I2C layer
- for X86 based systems.
+ for X86 and ARM64/ACPI based systems.
Controller is implemented as CPLD logic.
This driver can also be built as a module. If so, the module will be
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index cc58feacd082..0231c5be6354 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -165,14 +165,15 @@ static int ali15x3_setup(struct pci_dev *ALI15X3_dev)
}
if(force_addr) {
+ int ret;
+
dev_info(&ALI15X3_dev->dev, "forcing ISA address 0x%04X\n",
ali15x3_smba);
- if (PCIBIOS_SUCCESSFUL != pci_write_config_word(ALI15X3_dev,
- SMBBA,
- ali15x3_smba))
+ ret = pci_write_config_word(ALI15X3_dev, SMBBA, ali15x3_smba);
+ if (ret != PCIBIOS_SUCCESSFUL)
goto error;
- if (PCIBIOS_SUCCESSFUL != pci_read_config_word(ALI15X3_dev,
- SMBBA, &a))
+ ret = pci_read_config_word(ALI15X3_dev, SMBBA, &a);
+ if (ret != PCIBIOS_SUCCESSFUL)
goto error;
if ((a & ~(ALI15X3_SMB_IOSIZE - 1)) != ali15x3_smba) {
/* make sure it works */
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 2e5acfeb76c8..5a416b39b818 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -698,13 +698,16 @@ static int aspeed_i2c_master_xfer(struct i2c_adapter *adap,
if (time_left == 0) {
/*
- * If timed out and bus is still busy in a multi master
- * environment, attempt recovery at here.
+ * In a multi-master setup, if a timeout occurs, attempt
+ * recovery. But if the bus is idle, we still need to reset the
+ * i2c controller to clear the remaining interrupts.
*/
if (bus->multi_master &&
(readl(bus->base + ASPEED_I2C_CMD_REG) &
ASPEED_I2CD_BUS_BUSY_STS))
aspeed_i2c_recover_bus(bus);
+ else
+ aspeed_i2c_reset(bus);
/*
* If timed out and the state is still pending, drop the pending
diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c
index 05ad3bc3578a..db45554327ae 100644
--- a/drivers/i2c/busses/i2c-at91-core.c
+++ b/drivers/i2c/busses/i2c-at91-core.c
@@ -19,7 +19,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/pinctrl/consumer.h>
@@ -207,19 +206,15 @@ static int at91_twi_probe(struct platform_device *pdev)
dev->dev = &pdev->dev;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem)
- return -ENODEV;
+ dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
+ if (IS_ERR(dev->base))
+ return PTR_ERR(dev->base);
phy_addr = mem->start;
dev->pdata = at91_twi_get_driver_data(pdev);
if (!dev->pdata)
return -ENODEV;
- dev->base = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(dev->base))
- return PTR_ERR(dev->base);
-
dev->irq = platform_get_irq(pdev, 0);
if (dev->irq < 0)
return dev->irq;
@@ -227,10 +222,9 @@ static int at91_twi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
dev->clk = devm_clk_get(dev->dev, NULL);
- if (IS_ERR(dev->clk)) {
- dev_err(dev->dev, "no clock defined\n");
- return -ENODEV;
- }
+ if (IS_ERR(dev->clk))
+ return dev_err_probe(dev->dev, PTR_ERR(dev->clk), "no clock defined\n");
+
clk_prepare_enable(dev->clk);
snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91");
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
index c0c35785a0dc..d311981d3e60 100644
--- a/drivers/i2c/busses/i2c-at91-master.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -23,7 +23,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -832,7 +831,11 @@ static int at91_init_twi_recovery_gpio(struct platform_device *pdev,
struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
rinfo->pinctrl = devm_pinctrl_get(&pdev->dev);
- if (!rinfo->pinctrl || IS_ERR(rinfo->pinctrl)) {
+ if (!rinfo->pinctrl) {
+ dev_info(dev->dev, "pinctrl unavailable, bus recovery not supported\n");
+ return 0;
+ }
+ if (IS_ERR(rinfo->pinctrl)) {
dev_info(dev->dev, "can't get pinctrl, bus recovery not supported\n");
return PTR_ERR(rinfo->pinctrl);
}
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index e66c12ecf270..8e43f25c117e 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -342,7 +342,6 @@ static void i2c_au1550_remove(struct platform_device *pdev)
i2c_au1550_disable(priv);
}
-#ifdef CONFIG_PM
static int i2c_au1550_suspend(struct device *dev)
{
struct i2c_au1550_data *priv = dev_get_drvdata(dev);
@@ -361,21 +360,13 @@ static int i2c_au1550_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops i2c_au1550_pmops = {
- .suspend = i2c_au1550_suspend,
- .resume = i2c_au1550_resume,
-};
-
-#define AU1XPSC_SMBUS_PMOPS (&i2c_au1550_pmops)
-
-#else
-#define AU1XPSC_SMBUS_PMOPS NULL
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(i2c_au1550_pmops,
+ i2c_au1550_suspend, i2c_au1550_resume);
static struct platform_driver au1xpsc_smbus_driver = {
.driver = {
.name = "au1xpsc_smbus",
- .pm = AU1XPSC_SMBUS_PMOPS,
+ .pm = pm_sleep_ptr(&i2c_au1550_pmops),
},
.probe = i2c_au1550_probe,
.remove_new = i2c_au1550_remove,
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 05c80680dff4..51aab662050b 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -7,7 +7,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -1029,7 +1029,6 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
int irq, ret = 0;
struct bcm_iproc_i2c_dev *iproc_i2c;
struct i2c_adapter *adap;
- struct resource *res;
iproc_i2c = devm_kzalloc(&pdev->dev, sizeof(*iproc_i2c),
GFP_KERNEL);
@@ -1042,15 +1041,12 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
(enum bcm_iproc_i2c_type)of_device_get_match_data(&pdev->dev);
init_completion(&iproc_i2c->done);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iproc_i2c->base = devm_ioremap_resource(iproc_i2c->device, res);
+ iproc_i2c->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(iproc_i2c->base))
return PTR_ERR(iproc_i2c->base);
if (iproc_i2c->type == IPROC_I2C_NIC) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- iproc_i2c->idm_base = devm_ioremap_resource(iproc_i2c->device,
- res);
+ iproc_i2c->idm_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(iproc_i2c->idm_base))
return PTR_ERR(iproc_i2c->idm_base);
@@ -1128,8 +1124,6 @@ static void bcm_iproc_i2c_remove(struct platform_device *pdev)
bcm_iproc_i2c_enable_disable(iproc_i2c, false);
}
-#ifdef CONFIG_PM_SLEEP
-
static int bcm_iproc_i2c_suspend(struct device *dev)
{
struct bcm_iproc_i2c_dev *iproc_i2c = dev_get_drvdata(dev);
@@ -1180,12 +1174,6 @@ static const struct dev_pm_ops bcm_iproc_i2c_pm_ops = {
.resume_early = &bcm_iproc_i2c_resume
};
-#define BCM_IPROC_I2C_PM_OPS (&bcm_iproc_i2c_pm_ops)
-#else
-#define BCM_IPROC_I2C_PM_OPS NULL
-#endif /* CONFIG_PM_SLEEP */
-
-
static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave)
{
struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter);
@@ -1258,7 +1246,7 @@ static struct platform_driver bcm_iproc_i2c_driver = {
.driver = {
.name = "bcm-iproc-i2c",
.of_match_table = bcm_iproc_i2c_of_match,
- .pm = BCM_IPROC_I2C_PM_OPS,
+ .pm = pm_sleep_ptr(&bcm_iproc_i2c_pm_ops),
},
.probe = bcm_iproc_i2c_probe,
.remove_new = bcm_iproc_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index 8ce6d3f49551..b92de1944221 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -12,7 +12,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -430,10 +430,9 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
i2c_dev->bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev);
- if (IS_ERR(i2c_dev->bus_clk)) {
- dev_err(&pdev->dev, "Could not register clock\n");
- return PTR_ERR(i2c_dev->bus_clk);
- }
+ if (IS_ERR(i2c_dev->bus_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(i2c_dev->bus_clk),
+ "Could not register clock\n");
ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
&bus_clk_rate);
@@ -444,10 +443,9 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
}
ret = clk_set_rate_exclusive(i2c_dev->bus_clk, bus_clk_rate);
- if (ret < 0) {
- dev_err(&pdev->dev, "Could not set clock frequency\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Could not set clock frequency\n");
ret = clk_prepare_enable(i2c_dev->bus_clk);
if (ret) {
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index cf92cbcb8c86..acee76732544 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -594,11 +594,10 @@ static int bcm2711_release_bsc(struct brcmstb_i2c_dev *dev)
static int brcmstb_i2c_probe(struct platform_device *pdev)
{
- int rc = 0;
struct brcmstb_i2c_dev *dev;
struct i2c_adapter *adap;
- struct resource *iomem;
const char *int_name;
+ int rc;
/* Allocate memory for private data structure */
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
@@ -614,18 +613,15 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
init_completion(&dev->done);
/* Map hardware registers */
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->base = devm_ioremap_resource(dev->device, iomem);
- if (IS_ERR(dev->base)) {
- rc = -ENOMEM;
- goto probe_errorout;
- }
+ dev->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dev->base))
+ return PTR_ERR(dev->base);
if (of_device_is_compatible(dev->device->of_node,
"brcm,bcm2711-hdmi-i2c")) {
rc = bcm2711_release_bsc(dev);
if (rc)
- goto probe_errorout;
+ return rc;
}
rc = of_property_read_string(dev->device->of_node, "interrupt-names",
@@ -678,16 +674,13 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
adap->dev.of_node = pdev->dev.of_node;
rc = i2c_add_adapter(adap);
if (rc)
- goto probe_errorout;
+ return rc;
dev_info(dev->device, "%s@%dhz registered in %s mode\n",
int_name ? int_name : " ", dev->clk_freq_hz,
(dev->irq >= 0) ? "interrupt" : "polling");
return 0;
-
-probe_errorout:
- return rc;
}
static void brcmstb_i2c_remove(struct platform_device *pdev)
@@ -697,7 +690,6 @@ static void brcmstb_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&dev->adapter);
}
-#ifdef CONFIG_PM_SLEEP
static int brcmstb_i2c_suspend(struct device *dev)
{
struct brcmstb_i2c_dev *i2c_dev = dev_get_drvdata(dev);
@@ -715,10 +707,9 @@ static int brcmstb_i2c_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(brcmstb_i2c_pm, brcmstb_i2c_suspend,
- brcmstb_i2c_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(brcmstb_i2c_pm, brcmstb_i2c_suspend,
+ brcmstb_i2c_resume);
static const struct of_device_id brcmstb_i2c_of_match[] = {
{.compatible = "brcm,brcmstb-i2c"},
@@ -732,7 +723,7 @@ static struct platform_driver brcmstb_i2c_driver = {
.driver = {
.name = "brcmstb-i2c",
.of_match_table = brcmstb_i2c_of_match,
- .pm = &brcmstb_i2c_pm,
+ .pm = pm_sleep_ptr(&brcmstb_i2c_pm),
},
.probe = brcmstb_i2c_probe,
.remove_new = brcmstb_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 9849f4502570..de3f58b60dce 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -182,6 +182,7 @@ enum cdns_i2c_slave_state {
* @reset: Reset control for the device
* @quirks: flag for broken hold bit usage in r1p10
* @ctrl_reg: Cached value of the control register.
+ * @rinfo: I2C GPIO recovery information
* @ctrl_reg_diva_divb: value of fields DIV_A and DIV_B from CR register
* @slave: Registered slave instance.
* @dev_mode: I2C operating role(master/slave).
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 732daf6a932b..9a664abf734d 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -26,10 +26,10 @@
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <sysdev/fsl_soc.h>
#include <asm/cpm.h>
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 71b60778c643..02b3b1160fb0 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -25,7 +25,7 @@
#include <linux/slab.h>
#include <linux/cpufreq.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_data/i2c-davinci.h>
#include <linux/pm_runtime.h>
@@ -765,7 +765,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return dev_err_probe(&pdev->dev, irq, "can't get irq resource\n");
+ return irq;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
@@ -902,7 +902,6 @@ static void davinci_i2c_remove(struct platform_device *pdev)
pm_runtime_disable(dev->dev);
}
-#ifdef CONFIG_PM
static int davinci_i2c_suspend(struct device *dev)
{
struct davinci_i2c_dev *i2c_dev = dev_get_drvdata(dev);
@@ -926,15 +925,10 @@ static int davinci_i2c_resume(struct device *dev)
static const struct dev_pm_ops davinci_i2c_pm = {
.suspend = davinci_i2c_suspend,
.resume = davinci_i2c_resume,
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
-#define davinci_i2c_pm_ops (&davinci_i2c_pm)
-#else
-#define davinci_i2c_pm_ops NULL
-#endif
-
static const struct platform_device_id davinci_i2c_driver_ids[] = {
{ .name = "i2c_davinci", },
{ /* sentinel */ }
@@ -947,7 +941,7 @@ static struct platform_driver davinci_i2c_driver = {
.id_table = davinci_i2c_driver_ids,
.driver = {
.name = "i2c_davinci",
- .pm = davinci_i2c_pm_ops,
+ .pm = pm_sleep_ptr(&davinci_i2c_pm),
.of_match_table = davinci_i2c_of_match,
},
};
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index cdd8c67d9129..affcfb243f0f 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -441,8 +441,25 @@ err_release_lock:
void __i2c_dw_disable(struct dw_i2c_dev *dev)
{
+ unsigned int raw_intr_stats;
+ unsigned int enable;
int timeout = 100;
+ bool abort_needed;
unsigned int status;
+ int ret;
+
+ regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &raw_intr_stats);
+ regmap_read(dev->map, DW_IC_ENABLE, &enable);
+
+ abort_needed = raw_intr_stats & DW_IC_INTR_MST_ON_HOLD;
+ if (abort_needed) {
+ regmap_write(dev->map, DW_IC_ENABLE, enable | DW_IC_ENABLE_ABORT);
+ ret = regmap_read_poll_timeout(dev->map, DW_IC_ENABLE, enable,
+ !(enable & DW_IC_ENABLE_ABORT), 10,
+ 100);
+ if (ret)
+ dev_err(dev->dev, "timeout while trying to abort current transfer\n");
+ }
do {
__i2c_dw_disable_nowait(dev);
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index cf4f684f5356..a7f6f3eafad7 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -98,6 +98,7 @@
#define DW_IC_INTR_START_DET BIT(10)
#define DW_IC_INTR_GEN_CALL BIT(11)
#define DW_IC_INTR_RESTART_DET BIT(12)
+#define DW_IC_INTR_MST_ON_HOLD BIT(13)
#define DW_IC_INTR_DEFAULT_MASK (DW_IC_INTR_RX_FULL | \
DW_IC_INTR_TX_ABRT | \
@@ -108,6 +109,8 @@
DW_IC_INTR_RX_UNDER | \
DW_IC_INTR_RD_REQ)
+#define DW_IC_ENABLE_ABORT BIT(1)
+
#define DW_IC_STATUS_ACTIVITY BIT(0)
#define DW_IC_STATUS_TFE BIT(2)
#define DW_IC_STATUS_RFNE BIT(3)
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index 24bef0025c98..ca1035e010c7 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -917,6 +918,17 @@ static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
return PTR_ERR(gpio);
rinfo->sda_gpiod = gpio;
+ rinfo->pinctrl = devm_pinctrl_get(dev->dev);
+ if (IS_ERR(rinfo->pinctrl)) {
+ if (PTR_ERR(rinfo->pinctrl) == -EPROBE_DEFER)
+ return PTR_ERR(rinfo->pinctrl);
+
+ rinfo->pinctrl = NULL;
+ dev_err(dev->dev, "getting pinctrl info failed: bus recovery might not work\n");
+ } else if (!rinfo->pinctrl) {
+ dev_dbg(dev->dev, "pinctrl is disabled, bus recovery might not work\n");
+ }
+
rinfo->recover_bus = i2c_generic_scl_recovery;
rinfo->prepare_recovery = i2c_dw_prepare_recovery;
rinfo->unprepare_recovery = i2c_dw_unprepare_recovery;
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 970c1c3b0402..855b698e99c0 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -418,7 +418,6 @@ static void dw_i2c_plat_remove(struct platform_device *pdev)
reset_control_assert(dev->rst);
}
-#ifdef CONFIG_PM_SLEEP
static int dw_i2c_plat_prepare(struct device *dev)
{
/*
@@ -429,11 +428,7 @@ static int dw_i2c_plat_prepare(struct device *dev)
*/
return !has_acpi_companion(dev);
}
-#else
-#define dw_i2c_plat_prepare NULL
-#endif
-#ifdef CONFIG_PM
static int dw_i2c_plat_runtime_suspend(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
@@ -447,7 +442,7 @@ static int dw_i2c_plat_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused dw_i2c_plat_suspend(struct device *dev)
+static int dw_i2c_plat_suspend(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
@@ -468,7 +463,7 @@ static int dw_i2c_plat_runtime_resume(struct device *dev)
return 0;
}
-static int __maybe_unused dw_i2c_plat_resume(struct device *dev)
+static int dw_i2c_plat_resume(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
@@ -479,16 +474,11 @@ static int __maybe_unused dw_i2c_plat_resume(struct device *dev)
}
static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
- .prepare = dw_i2c_plat_prepare,
- SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
- SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend, dw_i2c_plat_runtime_resume, NULL)
+ .prepare = pm_sleep_ptr(dw_i2c_plat_prepare),
+ LATE_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
+ RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend, dw_i2c_plat_runtime_resume, NULL)
};
-#define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops)
-#else
-#define DW_I2C_DEV_PMOPS NULL
-#endif
-
/* Work with hotplug and coldplug */
MODULE_ALIAS("platform:i2c_designware");
@@ -499,7 +489,7 @@ static struct platform_driver dw_i2c_driver = {
.name = "i2c_designware",
.of_match_table = of_match_ptr(dw_i2c_of_match),
.acpi_match_table = ACPI_PTR(dw_i2c_acpi_match),
- .pm = DW_I2C_DEV_PMOPS,
+ .pm = pm_ptr(&dw_i2c_dev_pm_ops),
},
};
diff --git a/drivers/i2c/busses/i2c-dln2.c b/drivers/i2c/busses/i2c-dln2.c
index 4f02cc2fb567..631109c7a098 100644
--- a/drivers/i2c/busses/i2c-dln2.c
+++ b/drivers/i2c/busses/i2c-dln2.c
@@ -218,10 +218,8 @@ static int dln2_i2c_probe(struct platform_device *pdev)
/* initialize the i2c interface */
ret = dln2_i2c_enable(dln2, true);
- if (ret < 0) {
- dev_err(dev, "failed to initialize adapter: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to initialize adapter\n");
/* and finally attach to i2c layer */
ret = i2c_add_adapter(&dln2->adapter);
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index 4ba93cd91c0f..557409410445 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -16,7 +16,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index f378cd479e55..2b0b9cdffa86 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -18,9 +18,7 @@
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/io.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
#include <linux/spinlock.h>
/*
@@ -892,7 +890,6 @@ static void exynos5_i2c_remove(struct platform_device *pdev)
clk_unprepare(i2c->pclk);
}
-#ifdef CONFIG_PM_SLEEP
static int exynos5_i2c_suspend_noirq(struct device *dev)
{
struct exynos5_i2c *i2c = dev_get_drvdata(dev);
@@ -934,11 +931,10 @@ err_pclk:
clk_disable_unprepare(i2c->pclk);
return ret;
}
-#endif
static const struct dev_pm_ops exynos5_i2c_dev_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos5_i2c_suspend_noirq,
- exynos5_i2c_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos5_i2c_suspend_noirq,
+ exynos5_i2c_resume_noirq)
};
static struct platform_driver exynos5_i2c_driver = {
@@ -946,7 +942,7 @@ static struct platform_driver exynos5_i2c_driver = {
.remove_new = exynos5_i2c_remove,
.driver = {
.name = "exynos5-hsi2c",
- .pm = &exynos5_i2c_dev_pm_ops,
+ .pm = pm_sleep_ptr(&exynos5_i2c_dev_pm_ops),
.of_match_table = exynos5_i2c_match,
},
};
diff --git a/drivers/i2c/busses/i2c-gxp.c b/drivers/i2c/busses/i2c-gxp.c
index 70b0de07ed99..efafc0528c44 100644
--- a/drivers/i2c/busses/i2c-gxp.c
+++ b/drivers/i2c/busses/i2c-gxp.c
@@ -4,8 +4,9 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
diff --git a/drivers/i2c/busses/i2c-hisi.c b/drivers/i2c/busses/i2c-hisi.c
index 0980c773cb5b..dfad5bad5075 100644
--- a/drivers/i2c/busses/i2c-hisi.c
+++ b/drivers/i2c/busses/i2c-hisi.c
@@ -470,18 +470,14 @@ static int hisi_i2c_probe(struct platform_device *pdev)
hisi_i2c_disable_int(ctlr, HISI_I2C_INT_ALL);
ret = devm_request_irq(dev, ctlr->irq, hisi_i2c_irq, 0, "hisi-i2c", ctlr);
- if (ret) {
- dev_err(dev, "failed to request irq handler, ret = %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request irq handler\n");
ctlr->clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
if (IS_ERR_OR_NULL(ctlr->clk)) {
ret = device_property_read_u64(dev, "clk_rate", &clk_rate_hz);
- if (ret) {
- dev_err(dev, "failed to get clock frequency, ret = %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get clock frequency\n");
} else {
clk_rate_hz = clk_get_rate(ctlr->clk);
}
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index 784a5f56eb76..8e75515c3ca4 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -475,7 +475,6 @@ static void hix5hd2_i2c_remove(struct platform_device *pdev)
pm_runtime_set_suspended(priv->dev);
}
-#ifdef CONFIG_PM
static int hix5hd2_i2c_runtime_suspend(struct device *dev)
{
struct hix5hd2_i2c_priv *priv = dev_get_drvdata(dev);
@@ -494,12 +493,11 @@ static int hix5hd2_i2c_runtime_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops hix5hd2_i2c_pm_ops = {
- SET_RUNTIME_PM_OPS(hix5hd2_i2c_runtime_suspend,
- hix5hd2_i2c_runtime_resume,
- NULL)
+ RUNTIME_PM_OPS(hix5hd2_i2c_runtime_suspend,
+ hix5hd2_i2c_runtime_resume,
+ NULL)
};
static const struct of_device_id hix5hd2_i2c_match[] = {
@@ -513,7 +511,7 @@ static struct platform_driver hix5hd2_i2c_driver = {
.remove_new = hix5hd2_i2c_remove,
.driver = {
.name = "hix5hd2-i2c",
- .pm = &hix5hd2_i2c_pm_ops,
+ .pm = pm_ptr(&hix5hd2_i2c_pm_ops),
.of_match_table = hix5hd2_i2c_match,
},
};
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 943b8e6d026d..1d855258a45d 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1754,6 +1754,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
"SMBus I801 adapter at %04lx", priv->smba);
err = i2c_add_adapter(&priv->adapter);
if (err) {
+ platform_device_unregister(priv->tco_pdev);
i801_acpi_remove(priv);
return err;
}
@@ -1808,7 +1809,6 @@ static void i801_shutdown(struct pci_dev *dev)
pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
}
-#ifdef CONFIG_PM_SLEEP
static int i801_suspend(struct device *dev)
{
struct i801_priv *priv = dev_get_drvdata(dev);
@@ -1827,9 +1827,8 @@ static int i801_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(i801_pm_ops, i801_suspend, i801_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(i801_pm_ops, i801_suspend, i801_resume);
static struct pci_driver i801_driver = {
.name = DRV_NAME,
@@ -1838,7 +1837,7 @@ static struct pci_driver i801_driver = {
.remove = i801_remove,
.shutdown = i801_shutdown,
.driver = {
- .pm = &i801_pm_ops,
+ .pm = pm_sleep_ptr(&i801_pm_ops),
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 1ad9d3b26dd3..408820319ec4 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -37,9 +37,10 @@
#include <asm/irq.h>
#include <linux/io.h>
#include <linux/i2c.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include "i2c-ibm_iic.h"
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index 66ba36949ab5..f9d4bfef511c 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -1454,7 +1454,6 @@ static int img_i2c_runtime_resume(struct device *dev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int img_i2c_suspend(struct device *dev)
{
struct img_i2c *i2c = dev_get_drvdata(dev);
@@ -1482,13 +1481,10 @@ static int img_i2c_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops img_i2c_pm = {
- SET_RUNTIME_PM_OPS(img_i2c_runtime_suspend,
- img_i2c_runtime_resume,
- NULL)
- SET_SYSTEM_SLEEP_PM_OPS(img_i2c_suspend, img_i2c_resume)
+ RUNTIME_PM_OPS(img_i2c_runtime_suspend, img_i2c_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(img_i2c_suspend, img_i2c_resume)
};
static const struct of_device_id img_scb_i2c_match[] = {
@@ -1501,7 +1497,7 @@ static struct platform_driver img_scb_i2c_driver = {
.driver = {
.name = "img-i2c-scb",
.of_match_table = img_scb_i2c_match,
- .pm = &img_i2c_pm,
+ .pm = pm_ptr(&img_i2c_pm),
},
.probe = img_i2c_probe,
.remove_new = img_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 150d923ca7f1..678b30e90492 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -517,14 +516,12 @@ static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
temp = readl(lpi2c_imx->base + LPI2C_MSR);
temp &= enabled;
- if (temp & MSR_RDF)
- lpi2c_imx_read_rxfifo(lpi2c_imx);
-
- if (temp & MSR_TDF)
- lpi2c_imx_write_txfifo(lpi2c_imx);
-
if (temp & MSR_NDF)
complete(&lpi2c_imx->complete);
+ else if (temp & MSR_RDF)
+ lpi2c_imx_read_rxfifo(lpi2c_imx);
+ else if (temp & MSR_TDF)
+ lpi2c_imx_write_txfifo(lpi2c_imx);
return IRQ_HANDLED;
}
@@ -572,10 +569,8 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
sizeof(lpi2c_imx->adapter.name));
ret = devm_clk_bulk_get_all(&pdev->dev, &lpi2c_imx->clks);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't get I2C peripheral clock, ret=%d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "can't get I2C peripheral clock\n");
lpi2c_imx->num_clks = ret;
ret = of_property_read_u32(pdev->dev.of_node,
@@ -585,10 +580,8 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev, irq, lpi2c_imx_isr, 0,
pdev->name, lpi2c_imx);
- if (ret) {
- dev_err(&pdev->dev, "can't claim irq %d\n", irq);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "can't claim irq %d\n", irq);
i2c_set_adapdata(&lpi2c_imx->adapter, lpi2c_imx);
platform_set_drvdata(pdev, lpi2c_imx);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 65128a73e8a3..1775a79aeba2 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -41,7 +41,6 @@
#include <linux/hrtimer.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_data/i2c-imx.h>
@@ -1389,7 +1388,11 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
struct i2c_bus_recovery_info *rinfo = &i2c_imx->rinfo;
i2c_imx->pinctrl = devm_pinctrl_get(&pdev->dev);
- if (!i2c_imx->pinctrl || IS_ERR(i2c_imx->pinctrl)) {
+ if (!i2c_imx->pinctrl) {
+ dev_info(&pdev->dev, "pinctrl unavailable, bus recovery not supported\n");
+ return 0;
+ }
+ if (IS_ERR(i2c_imx->pinctrl)) {
dev_info(&pdev->dev, "can't get pinctrl, bus recovery not supported\n");
return PTR_ERR(i2c_imx->pinctrl);
}
@@ -1506,8 +1509,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
goto rpm_disable;
/* Request IRQ */
- ret = request_threaded_irq(irq, i2c_imx_isr, NULL, IRQF_SHARED,
- pdev->name, i2c_imx);
+ ret = request_irq(irq, i2c_imx_isr, IRQF_SHARED, pdev->name, i2c_imx);
if (ret) {
dev_err(&pdev->dev, "can't claim irq %d\n", irq);
goto rpm_disable;
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index 0dfe60399521..55035cca0ae5 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -18,7 +18,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c
index 281058e3ea46..e01d75308288 100644
--- a/drivers/i2c/busses/i2c-kempld.c
+++ b/drivers/i2c/busses/i2c-kempld.c
@@ -350,10 +350,9 @@ static void kempld_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c->adap);
}
-#ifdef CONFIG_PM
-static int kempld_i2c_suspend(struct platform_device *pdev, pm_message_t state)
+static int kempld_i2c_suspend(struct device *dev)
{
- struct kempld_i2c_data *i2c = platform_get_drvdata(pdev);
+ struct kempld_i2c_data *i2c = dev_get_drvdata(dev);
struct kempld_device_data *pld = i2c->pld;
u8 ctrl;
@@ -366,9 +365,9 @@ static int kempld_i2c_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int kempld_i2c_resume(struct platform_device *pdev)
+static int kempld_i2c_resume(struct device *dev)
{
- struct kempld_i2c_data *i2c = platform_get_drvdata(pdev);
+ struct kempld_i2c_data *i2c = dev_get_drvdata(dev);
struct kempld_device_data *pld = i2c->pld;
kempld_get_mutex(pld);
@@ -377,19 +376,17 @@ static int kempld_i2c_resume(struct platform_device *pdev)
return 0;
}
-#else
-#define kempld_i2c_suspend NULL
-#define kempld_i2c_resume NULL
-#endif
+
+static DEFINE_SIMPLE_DEV_PM_OPS(kempld_i2c_pm_ops,
+ kempld_i2c_suspend, kempld_i2c_resume);
static struct platform_driver kempld_i2c_driver = {
.driver = {
.name = "kempld-i2c",
+ .pm = pm_sleep_ptr(&kempld_i2c_pm_ops),
},
.probe = kempld_i2c_probe,
.remove_new = kempld_i2c_remove,
- .suspend = kempld_i2c_suspend,
- .resume = kempld_i2c_resume,
};
module_platform_driver(kempld_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c
index 5c6d96554753..e3660333e91c 100644
--- a/drivers/i2c/busses/i2c-lpc2k.c
+++ b/drivers/i2c/busses/i2c-lpc2k.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/time.h>
@@ -431,7 +430,6 @@ static void i2c_lpc2k_remove(struct platform_device *dev)
i2c_del_adapter(&i2c->adap);
}
-#ifdef CONFIG_PM
static int i2c_lpc2k_suspend(struct device *dev)
{
struct lpc2k_i2c *i2c = dev_get_drvdata(dev);
@@ -456,11 +454,6 @@ static const struct dev_pm_ops i2c_lpc2k_dev_pm_ops = {
.resume_noirq = i2c_lpc2k_resume,
};
-#define I2C_LPC2K_DEV_PM_OPS (&i2c_lpc2k_dev_pm_ops)
-#else
-#define I2C_LPC2K_DEV_PM_OPS NULL
-#endif
-
static const struct of_device_id lpc2k_i2c_match[] = {
{ .compatible = "nxp,lpc1788-i2c" },
{},
@@ -472,7 +465,7 @@ static struct platform_driver i2c_lpc2k_driver = {
.remove_new = i2c_lpc2k_remove,
.driver = {
.name = "lpc2k-i2c",
- .pm = I2C_LPC2K_DEV_PM_OPS,
+ .pm = pm_sleep_ptr(&i2c_lpc2k_dev_pm_ops),
.of_match_table = lpc2k_i2c_match,
},
};
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index 16026c895bb6..c7b203cc4434 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/types.h>
diff --git a/drivers/i2c/busses/i2c-microchip-corei2c.c b/drivers/i2c/busses/i2c-microchip-corei2c.c
index 7f58f7eaabb6..0b0a1c4d17ca 100644
--- a/drivers/i2c/busses/i2c-microchip-corei2c.c
+++ b/drivers/i2c/busses/i2c-microchip-corei2c.c
@@ -378,9 +378,8 @@ static int mchp_corei2c_probe(struct platform_device *pdev)
return PTR_ERR(idev->base);
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return dev_err_probe(&pdev->dev, -ENXIO,
- "invalid IRQ %d for I2C controller\n", irq);
+ if (irq < 0)
+ return irq;
idev->i2c_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(idev->i2c_clk))
diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
index ae66bdd1b737..b3a73921ab69 100644
--- a/drivers/i2c/busses/i2c-mlxbf.c
+++ b/drivers/i2c/busses/i2c-mlxbf.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/string.h>
@@ -1080,13 +1080,7 @@ static int mlxbf_i2c_init_resource(struct platform_device *pdev,
if (!tmp_res)
return -ENOMEM;
- tmp_res->params = platform_get_resource(pdev, IORESOURCE_MEM, type);
- if (!tmp_res->params) {
- devm_kfree(dev, tmp_res);
- return -EIO;
- }
-
- tmp_res->io = devm_ioremap_resource(dev, tmp_res->params);
+ tmp_res->io = devm_platform_get_and_ioremap_resource(pdev, type, &tmp_res->params);
if (IS_ERR(tmp_res->io)) {
devm_kfree(dev, tmp_res);
return PTR_ERR(tmp_res->io);
@@ -2323,10 +2317,8 @@ static int mlxbf_i2c_probe(struct platform_device *pdev)
ret = mlxbf_i2c_init_resource(pdev, &priv->smbus,
MLXBF_I2C_SMBUS_RES);
- if (ret < 0) {
- dev_err(dev, "Cannot fetch smbus resource info");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot fetch smbus resource info");
priv->timer->io = priv->smbus->io;
priv->mst->io = priv->smbus->io + MLXBF_I2C_MST_ADDR_OFFSET;
@@ -2334,39 +2326,29 @@ static int mlxbf_i2c_probe(struct platform_device *pdev)
} else {
ret = mlxbf_i2c_init_resource(pdev, &priv->timer,
MLXBF_I2C_SMBUS_TIMER_RES);
- if (ret < 0) {
- dev_err(dev, "Cannot fetch timer resource info");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot fetch timer resource info");
ret = mlxbf_i2c_init_resource(pdev, &priv->mst,
MLXBF_I2C_SMBUS_MST_RES);
- if (ret < 0) {
- dev_err(dev, "Cannot fetch master resource info");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot fetch master resource info");
ret = mlxbf_i2c_init_resource(pdev, &priv->slv,
MLXBF_I2C_SMBUS_SLV_RES);
- if (ret < 0) {
- dev_err(dev, "Cannot fetch slave resource info");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot fetch slave resource info");
}
ret = mlxbf_i2c_init_resource(pdev, &priv->mst_cause,
MLXBF_I2C_MST_CAUSE_RES);
- if (ret < 0) {
- dev_err(dev, "Cannot fetch cause master resource info");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot fetch cause master resource info");
ret = mlxbf_i2c_init_resource(pdev, &priv->slv_cause,
MLXBF_I2C_SLV_CAUSE_RES);
- if (ret < 0) {
- dev_err(dev, "Cannot fetch cause slave resource info");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot fetch cause slave resource info");
adap = &priv->adap;
adap->owner = THIS_MODULE;
@@ -2397,11 +2379,9 @@ static int mlxbf_i2c_probe(struct platform_device *pdev)
* does not really hurt, then keep the code as is.
*/
ret = mlxbf_i2c_init_master(pdev, priv);
- if (ret < 0) {
- dev_err(dev, "failed to initialize smbus master %d",
- priv->bus);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to initialize smbus master %d",
+ priv->bus);
mlxbf_i2c_init_timings(pdev, priv);
@@ -2413,10 +2393,8 @@ static int mlxbf_i2c_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, mlxbf_i2c_irq,
IRQF_SHARED | IRQF_PROBE_SHARED,
dev_name(dev), priv);
- if (ret < 0) {
- dev_err(dev, "Cannot get irq %d\n", irq);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot get irq %d\n", irq);
priv->irq = irq;
diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
index c42fd4b329e4..6fec64ea67fb 100644
--- a/drivers/i2c/busses/i2c-mlxcpld.c
+++ b/drivers/i2c/busses/i2c-mlxcpld.c
@@ -22,6 +22,7 @@
#define MLXCPLD_I2C_BUS_NUM 1
#define MLXCPLD_I2C_DATA_REG_SZ 36
#define MLXCPLD_I2C_DATA_SZ_BIT BIT(5)
+#define MLXCPLD_I2C_DATA_EXT2_SZ_BIT BIT(6)
#define MLXCPLD_I2C_DATA_SZ_MASK GENMASK(6, 5)
#define MLXCPLD_I2C_SMBUS_BLK_BIT BIT(7)
#define MLXCPLD_I2C_MAX_ADDR_LEN 4
@@ -466,6 +467,13 @@ static const struct i2c_adapter_quirks mlxcpld_i2c_quirks_ext = {
.max_comb_1st_msg_len = 4,
};
+static const struct i2c_adapter_quirks mlxcpld_i2c_quirks_ext2 = {
+ .flags = I2C_AQ_COMB_WRITE_THEN_READ,
+ .max_read_len = (MLXCPLD_I2C_DATA_REG_SZ - 4) * 4,
+ .max_write_len = (MLXCPLD_I2C_DATA_REG_SZ - 4) * 4 + MLXCPLD_I2C_MAX_ADDR_LEN,
+ .max_comb_1st_msg_len = 4,
+};
+
static struct i2c_adapter mlxcpld_i2c_adapter = {
.owner = THIS_MODULE,
.name = "i2c-mlxcpld",
@@ -547,6 +555,8 @@ static int mlxcpld_i2c_probe(struct platform_device *pdev)
/* Check support for extended transaction length */
if ((val & MLXCPLD_I2C_DATA_SZ_MASK) == MLXCPLD_I2C_DATA_SZ_BIT)
mlxcpld_i2c_adapter.quirks = &mlxcpld_i2c_quirks_ext;
+ else if ((val & MLXCPLD_I2C_DATA_SZ_MASK) == MLXCPLD_I2C_DATA_EXT2_SZ_BIT)
+ mlxcpld_i2c_adapter.quirks = &mlxcpld_i2c_quirks_ext2;
/* Check support for smbus block transaction */
if (val & MLXCPLD_I2C_SMBUS_BLK_BIT)
priv->smbus_block = true;
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index f460a7fb4eae..e4e4995ab224 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -11,9 +11,10 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched/signal.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/slab.h>
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 7ca3f2221ba6..1a9b5a068ef1 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -19,9 +19,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
@@ -1514,7 +1512,6 @@ static void mtk_i2c_remove(struct platform_device *pdev)
clk_bulk_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks);
}
-#ifdef CONFIG_PM_SLEEP
static int mtk_i2c_suspend_noirq(struct device *dev)
{
struct mtk_i2c *i2c = dev_get_drvdata(dev);
@@ -1544,11 +1541,10 @@ static int mtk_i2c_resume_noirq(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops mtk_i2c_pm = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_i2c_suspend_noirq,
- mtk_i2c_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_i2c_suspend_noirq,
+ mtk_i2c_resume_noirq)
};
static struct platform_driver mtk_i2c_driver = {
@@ -1556,7 +1552,7 @@ static struct platform_driver mtk_i2c_driver = {
.remove_new = mtk_i2c_remove,
.driver = {
.name = I2C_DRV_NAME,
- .pm = &mtk_i2c_pm,
+ .pm = pm_sleep_ptr(&mtk_i2c_pm),
.of_match_table = mtk_i2c_of_match,
},
};
diff --git a/drivers/i2c/busses/i2c-mt7621.c b/drivers/i2c/busses/i2c-mt7621.c
index 104bb194e990..81d46169bc1f 100644
--- a/drivers/i2c/busses/i2c-mt7621.c
+++ b/drivers/i2c/busses/i2c-mt7621.c
@@ -16,7 +16,8 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#define REG_SM0CFG2_REG 0x28
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 1d76f1c4dc06..36def0a9c95c 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -22,7 +22,6 @@
#include <linux/io.h>
#include <linux/stmp_device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/dma/mxs-dma.h>
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 777278386f58..38d203d93eee 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -327,8 +327,8 @@ static int nforce2_probe_smb(struct pci_dev *dev, int bar, int alt_reg,
/* Older incarnations of the device used non-standard BARs */
u16 iobase;
- if (pci_read_config_word(dev, alt_reg, &iobase)
- != PCIBIOS_SUCCESSFUL) {
+ error = pci_read_config_word(dev, alt_reg, &iobase);
+ if (error != PCIBIOS_SUCCESSFUL) {
dev_err(&dev->dev, "Error reading PCI config for %s\n",
name);
return -EIO;
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 212f412f1c74..b10574d42b7a 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -873,7 +873,6 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-#ifdef CONFIG_PM_SLEEP
static int nmk_i2c_suspend_late(struct device *dev)
{
int ret;
@@ -890,9 +889,7 @@ static int nmk_i2c_resume_early(struct device *dev)
{
return pm_runtime_force_resume(dev);
}
-#endif
-#ifdef CONFIG_PM
static int nmk_i2c_runtime_suspend(struct device *dev)
{
struct amba_device *adev = to_amba_device(dev);
@@ -925,13 +922,10 @@ static int nmk_i2c_runtime_resume(struct device *dev)
return ret;
}
-#endif
static const struct dev_pm_ops nmk_i2c_pm = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(nmk_i2c_suspend_late, nmk_i2c_resume_early)
- SET_RUNTIME_PM_OPS(nmk_i2c_runtime_suspend,
- nmk_i2c_runtime_resume,
- NULL)
+ LATE_SYSTEM_SLEEP_PM_OPS(nmk_i2c_suspend_late, nmk_i2c_resume_early)
+ RUNTIME_PM_OPS(nmk_i2c_runtime_suspend, nmk_i2c_runtime_resume, NULL)
};
static unsigned int nmk_i2c_functionality(struct i2c_adapter *adap)
@@ -1078,7 +1072,7 @@ static struct amba_driver nmk_i2c_driver = {
.drv = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
- .pm = &nmk_i2c_pm,
+ .pm = pm_ptr(&nmk_i2c_pm),
},
.id_table = nmk_i2c_ids,
.probe = nmk_i2c_probe,
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index 53b65ffb6a64..ae4bae63ad4f 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -17,7 +17,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -695,6 +694,7 @@ static void npcm_i2c_callback(struct npcm_i2c *bus,
{
struct i2c_msg *msgs;
int msgs_num;
+ bool do_complete = false;
msgs = bus->msgs;
msgs_num = bus->msgs_num;
@@ -723,23 +723,17 @@ static void npcm_i2c_callback(struct npcm_i2c *bus,
msgs[1].flags & I2C_M_RD)
msgs[1].len = info;
}
- if (completion_done(&bus->cmd_complete) == false)
- complete(&bus->cmd_complete);
- break;
-
+ do_complete = true;
+ break;
case I2C_NACK_IND:
/* MASTER transmit got a NACK before tx all bytes */
bus->cmd_err = -ENXIO;
- if (bus->master_or_slave == I2C_MASTER)
- complete(&bus->cmd_complete);
-
+ do_complete = true;
break;
case I2C_BUS_ERR_IND:
/* Bus error */
bus->cmd_err = -EAGAIN;
- if (bus->master_or_slave == I2C_MASTER)
- complete(&bus->cmd_complete);
-
+ do_complete = true;
break;
case I2C_WAKE_UP_IND:
/* I2C wake up */
@@ -753,6 +747,8 @@ static void npcm_i2c_callback(struct npcm_i2c *bus,
if (bus->slave)
bus->master_or_slave = I2C_SLAVE;
#endif
+ if (do_complete)
+ complete(&bus->cmd_complete);
}
static u8 npcm_i2c_fifo_usage(struct npcm_i2c *bus)
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 4ac77e57bbbf..041a76f71a49 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -743,7 +743,6 @@ static void ocores_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c->adap);
}
-#ifdef CONFIG_PM_SLEEP
static int ocores_i2c_suspend(struct device *dev)
{
struct ocores_i2c *i2c = dev_get_drvdata(dev);
@@ -772,11 +771,8 @@ static int ocores_i2c_resume(struct device *dev)
return ocores_init(dev, i2c);
}
-static SIMPLE_DEV_PM_OPS(ocores_i2c_pm, ocores_i2c_suspend, ocores_i2c_resume);
-#define OCORES_I2C_PM (&ocores_i2c_pm)
-#else
-#define OCORES_I2C_PM NULL
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(ocores_i2c_pm,
+ ocores_i2c_suspend, ocores_i2c_resume);
static struct platform_driver ocores_i2c_driver = {
.probe = ocores_i2c_probe,
@@ -784,7 +780,7 @@ static struct platform_driver ocores_i2c_driver = {
.driver = {
.name = "ocores-i2c",
.of_match_table = ocores_i2c_match,
- .pm = OCORES_I2C_PM,
+ .pm = pm_sleep_ptr(&ocores_i2c_pm),
},
};
diff --git a/drivers/i2c/busses/i2c-owl.c b/drivers/i2c/busses/i2c-owl.c
index 5f0ef8c35141..777f1a0278c7 100644
--- a/drivers/i2c/busses/i2c-owl.c
+++ b/drivers/i2c/busses/i2c-owl.c
@@ -16,7 +16,8 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
/* I2C registers */
#define OWL_I2C_REG_CTL 0x0000
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index d2a9e7b61c1a..b8d5480c54f6 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -22,7 +22,6 @@
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <asm/irq.h>
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 82400057f810..a12525b3186b 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -613,7 +613,6 @@ static const struct i2c_algorithm pnx_algorithm = {
.functionality = i2c_pnx_func,
};
-#ifdef CONFIG_PM_SLEEP
static int i2c_pnx_controller_suspend(struct device *dev)
{
struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev);
@@ -630,12 +629,9 @@ static int i2c_pnx_controller_resume(struct device *dev)
return clk_prepare_enable(alg_data->clk);
}
-static SIMPLE_DEV_PM_OPS(i2c_pnx_pm,
- i2c_pnx_controller_suspend, i2c_pnx_controller_resume);
-#define PNX_I2C_PM (&i2c_pnx_pm)
-#else
-#define PNX_I2C_PM NULL
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(i2c_pnx_pm,
+ i2c_pnx_controller_suspend,
+ i2c_pnx_controller_resume);
static int i2c_pnx_probe(struct platform_device *pdev)
{
@@ -683,8 +679,7 @@ static int i2c_pnx_probe(struct platform_device *pdev)
"%s", pdev->name);
/* Register I/O resource */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- alg_data->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ alg_data->ioaddr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(alg_data->ioaddr))
return PTR_ERR(alg_data->ioaddr);
@@ -763,7 +758,7 @@ static struct platform_driver i2c_pnx_driver = {
.driver = {
.name = "pnx-i2c",
.of_match_table = of_match_ptr(i2c_pnx_of_match),
- .pm = PNX_I2C_PM,
+ .pm = pm_sleep_ptr(&i2c_pnx_pm),
},
.probe = i2c_pnx_probe,
.remove_new = i2c_pnx_remove,
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index 30e38bc8b6db..08b3229c443d 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -12,7 +12,6 @@
#include <linux/platform_device.h>
#include <linux/platform_data/i2c-pxa.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
#define CE4100_PCI_I2C_DEVS 3
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 937f7eebe906..29be05af826b 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1362,7 +1362,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
struct i2c_pxa_platform_data *plat = dev_get_platdata(&dev->dev);
enum pxa_i2c_types i2c_type;
struct pxa_i2c *i2c;
- struct resource *res = NULL;
+ struct resource *res;
int ret, irq;
i2c = devm_kzalloc(&dev->dev, sizeof(struct pxa_i2c), GFP_KERNEL);
@@ -1379,8 +1379,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
i2c->adap.dev.of_node = dev->dev.of_node;
#endif
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- i2c->reg_base = devm_ioremap_resource(&dev->dev, res);
+ i2c->reg_base = devm_platform_get_and_ioremap_resource(dev, 0, &res);
if (IS_ERR(i2c->reg_base))
return PTR_ERR(i2c->reg_base);
@@ -1404,10 +1403,9 @@ static int i2c_pxa_probe(struct platform_device *dev)
strscpy(i2c->adap.name, "pxa_i2c-i2c", sizeof(i2c->adap.name));
i2c->clk = devm_clk_get(&dev->dev, NULL);
- if (IS_ERR(i2c->clk)) {
- dev_err(&dev->dev, "failed to get the clk: %ld\n", PTR_ERR(i2c->clk));
- return PTR_ERR(i2c->clk);
- }
+ if (IS_ERR(i2c->clk))
+ return dev_err_probe(&dev->dev, PTR_ERR(i2c->clk),
+ "failed to get the clk\n");
i2c->reg_ibmr = i2c->reg_base + pxa_reg_layout[i2c_type].ibmr;
i2c->reg_idbr = i2c->reg_base + pxa_reg_layout[i2c_type].idbr;
@@ -1491,7 +1489,6 @@ static void i2c_pxa_remove(struct platform_device *dev)
clk_disable_unprepare(i2c->clk);
}
-#ifdef CONFIG_PM
static int i2c_pxa_suspend_noirq(struct device *dev)
{
struct pxa_i2c *i2c = dev_get_drvdata(dev);
@@ -1516,17 +1513,12 @@ static const struct dev_pm_ops i2c_pxa_dev_pm_ops = {
.resume_noirq = i2c_pxa_resume_noirq,
};
-#define I2C_PXA_DEV_PM_OPS (&i2c_pxa_dev_pm_ops)
-#else
-#define I2C_PXA_DEV_PM_OPS NULL
-#endif
-
static struct platform_driver i2c_pxa_driver = {
.probe = i2c_pxa_probe,
.remove_new = i2c_pxa_remove,
.driver = {
.name = "pxa2xx-i2c",
- .pm = I2C_PXA_DEV_PM_OPS,
+ .pm = pm_sleep_ptr(&i2c_pxa_dev_pm_ops),
.of_match_table = i2c_pxa_dt_ids,
},
.id_table = i2c_pxa_id_table,
diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
index 622dc14add9d..414882c57d7f 100644
--- a/drivers/i2c/busses/i2c-qcom-cci.c
+++ b/drivers/i2c/busses/i2c-qcom-cci.c
@@ -588,10 +588,10 @@ static int cci_probe(struct platform_device *pdev)
/* Clocks */
ret = devm_clk_bulk_get_all(dev, &cci->clocks);
- if (ret < 1) {
- dev_err(dev, "failed to get clocks %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get clocks\n");
+ else if (!ret)
+ return dev_err_probe(dev, -EINVAL, "not enough clocks in DT\n");
cci->nclocks = ret;
/* Retrieve CCI clock rate */
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index b670a67c4fdd..229353e96e09 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -767,7 +767,6 @@ err_tx:
static int geni_i2c_probe(struct platform_device *pdev)
{
struct geni_i2c_dev *gi2c;
- struct resource *res;
u32 proto, tx_depth, fifo_disable;
int ret;
struct device *dev = &pdev->dev;
@@ -779,8 +778,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
gi2c->se.dev = dev;
gi2c->se.wrapper = dev_get_drvdata(dev->parent);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gi2c->se.base = devm_ioremap_resource(dev, res);
+ gi2c->se.base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gi2c->se.base))
return PTR_ERR(gi2c->se.base);
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index ae90170023b0..598102d16677 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -1927,7 +1927,6 @@ static void qup_i2c_remove(struct platform_device *pdev)
pm_runtime_set_suspended(qup->dev);
}
-#ifdef CONFIG_PM
static int qup_i2c_pm_suspend_runtime(struct device *device)
{
struct qup_i2c_dev *qup = dev_get_drvdata(device);
@@ -1945,9 +1944,7 @@ static int qup_i2c_pm_resume_runtime(struct device *device)
qup_i2c_enable_clocks(qup);
return 0;
}
-#endif
-#ifdef CONFIG_PM_SLEEP
static int qup_i2c_suspend(struct device *device)
{
if (!pm_runtime_suspended(device))
@@ -1962,16 +1959,11 @@ static int qup_i2c_resume(struct device *device)
pm_request_autosuspend(device);
return 0;
}
-#endif
static const struct dev_pm_ops qup_i2c_qup_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(
- qup_i2c_suspend,
- qup_i2c_resume)
- SET_RUNTIME_PM_OPS(
- qup_i2c_pm_suspend_runtime,
- qup_i2c_pm_resume_runtime,
- NULL)
+ SYSTEM_SLEEP_PM_OPS(qup_i2c_suspend, qup_i2c_resume)
+ RUNTIME_PM_OPS(qup_i2c_pm_suspend_runtime,
+ qup_i2c_pm_resume_runtime, NULL)
};
static const struct of_device_id qup_i2c_dt_match[] = {
@@ -1987,7 +1979,7 @@ static struct platform_driver qup_i2c_driver = {
.remove_new = qup_i2c_remove,
.driver = {
.name = "i2c_qup",
- .pm = &qup_i2c_qup_pm_ops,
+ .pm = pm_ptr(&qup_i2c_qup_pm_ops),
.of_match_table = qup_i2c_dt_match,
.acpi_match_table = ACPI_PTR(qup_i2c_acpi_match),
},
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 2d9c37410ebd..a32a93f9a60d 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -24,7 +24,7 @@
#include <linux/i2c-smbus.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -1169,7 +1169,6 @@ static void rcar_i2c_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
}
-#ifdef CONFIG_PM_SLEEP
static int rcar_i2c_suspend(struct device *dev)
{
struct rcar_i2c_priv *priv = dev_get_drvdata(dev);
@@ -1187,19 +1186,14 @@ static int rcar_i2c_resume(struct device *dev)
}
static const struct dev_pm_ops rcar_i2c_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rcar_i2c_suspend, rcar_i2c_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(rcar_i2c_suspend, rcar_i2c_resume)
};
-#define DEV_PM_OPS (&rcar_i2c_pm_ops)
-#else
-#define DEV_PM_OPS NULL
-#endif /* CONFIG_PM_SLEEP */
-
static struct platform_driver rcar_i2c_driver = {
.driver = {
.name = "i2c-rcar",
.of_match_table = rcar_i2c_dt_ids,
- .pm = DEV_PM_OPS,
+ .pm = pm_sleep_ptr(&rcar_i2c_pm_ops),
},
.probe = rcar_i2c_probe,
.remove_new = rcar_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index 5f8c0bd508d2..f0ee8871d5ae 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -42,7 +42,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 28f0e5c64f32..127eb3805fac 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -24,7 +24,6 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/gpio/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/mfd/syscon.h>
@@ -1034,9 +1033,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "clock source %p\n", i2c->clk);
/* map the registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- i2c->regs = devm_ioremap_resource(&pdev->dev, res);
-
+ i2c->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(i2c->regs))
return PTR_ERR(i2c->regs);
@@ -1076,7 +1073,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
if (!(i2c->quirks & QUIRK_POLL)) {
i2c->irq = ret = platform_get_irq(pdev, 0);
if (ret < 0) {
- dev_err(&pdev->dev, "cannot find IRQ\n");
clk_unprepare(i2c->clk);
return ret;
}
@@ -1125,7 +1121,6 @@ static void s3c24xx_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c->adap);
}
-#ifdef CONFIG_PM_SLEEP
static int s3c24xx_i2c_suspend_noirq(struct device *dev)
{
struct s3c24xx_i2c *i2c = dev_get_drvdata(dev);
@@ -1155,26 +1150,19 @@ static int s3c24xx_i2c_resume_noirq(struct device *dev)
return 0;
}
-#endif
-#ifdef CONFIG_PM
static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(s3c24xx_i2c_suspend_noirq,
- s3c24xx_i2c_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(s3c24xx_i2c_suspend_noirq,
+ s3c24xx_i2c_resume_noirq)
};
-#define S3C24XX_DEV_PM_OPS (&s3c24xx_i2c_dev_pm_ops)
-#else
-#define S3C24XX_DEV_PM_OPS NULL
-#endif
-
static struct platform_driver s3c24xx_i2c_driver = {
.probe = s3c24xx_i2c_probe,
.remove_new = s3c24xx_i2c_remove,
.id_table = s3c24xx_driver_ids,
.driver = {
.name = "s3c-i2c",
- .pm = S3C24XX_DEV_PM_OPS,
+ .pm = pm_sleep_ptr(&s3c24xx_i2c_dev_pm_ops),
.of_match_table = of_match_ptr(s3c24xx_i2c_match),
},
};
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 21717b943a9e..5adbe62cf621 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -20,7 +20,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -871,7 +871,6 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
{
struct sh_mobile_i2c_data *pd;
struct i2c_adapter *adap;
- struct resource *res;
const struct sh_mobile_dt_config *config;
int ret;
u32 bus_speed;
@@ -893,10 +892,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
pd->dev = &dev->dev;
platform_set_drvdata(dev, pd);
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
-
- pd->res = res;
- pd->reg = devm_ioremap_resource(&dev->dev, res);
+ pd->reg = devm_platform_get_and_ioremap_resource(dev, 0, &pd->res);
if (IS_ERR(pd->reg))
return PTR_ERR(pd->reg);
@@ -905,7 +901,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
pd->clks_per_count = 1;
/* Newer variants come with two new bits in ICIC */
- if (resource_size(res) > 0x17)
+ if (resource_size(pd->res) > 0x17)
pd->flags |= IIC_FLAG_HAS_ICIC67;
pm_runtime_enable(&dev->dev);
@@ -965,7 +961,6 @@ static void sh_mobile_i2c_remove(struct platform_device *dev)
pm_runtime_disable(&dev->dev);
}
-#ifdef CONFIG_PM_SLEEP
static int sh_mobile_i2c_suspend(struct device *dev)
{
struct sh_mobile_i2c_data *pd = dev_get_drvdata(dev);
@@ -983,20 +978,15 @@ static int sh_mobile_i2c_resume(struct device *dev)
}
static const struct dev_pm_ops sh_mobile_i2c_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sh_mobile_i2c_suspend,
- sh_mobile_i2c_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(sh_mobile_i2c_suspend,
+ sh_mobile_i2c_resume)
};
-#define DEV_PM_OPS (&sh_mobile_i2c_pm_ops)
-#else
-#define DEV_PM_OPS NULL
-#endif /* CONFIG_PM_SLEEP */
-
static struct platform_driver sh_mobile_i2c_driver = {
.driver = {
.name = "i2c-sh_mobile",
.of_match_table = sh_mobile_i2c_dt_ids,
- .pm = DEV_PM_OPS,
+ .pm = pm_sleep_ptr(&sh_mobile_i2c_pm_ops),
},
.probe = sh_mobile_i2c_probe,
.remove_new = sh_mobile_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index c793a5c14cda..486f1e9dfb74 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -175,11 +175,11 @@ static int sis5595_setup(struct pci_dev *SIS5595_dev)
if (force_addr) {
dev_info(&SIS5595_dev->dev, "forcing ISA address 0x%04X\n", sis5595_base);
- if (pci_write_config_word(SIS5595_dev, ACPI_BASE, sis5595_base)
- != PCIBIOS_SUCCESSFUL)
+ retval = pci_write_config_word(SIS5595_dev, ACPI_BASE, sis5595_base);
+ if (retval != PCIBIOS_SUCCESSFUL)
goto error;
- if (pci_read_config_word(SIS5595_dev, ACPI_BASE, &a)
- != PCIBIOS_SUCCESSFUL)
+ retval = pci_read_config_word(SIS5595_dev, ACPI_BASE, &a);
+ if (retval != PCIBIOS_SUCCESSFUL)
goto error;
if ((a & ~(SIS5595_EXTENT - 1)) != sis5595_base) {
/* doesn't work for some chips! */
@@ -188,16 +188,16 @@ static int sis5595_setup(struct pci_dev *SIS5595_dev)
}
}
- if (pci_read_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, &val)
- != PCIBIOS_SUCCESSFUL)
+ retval = pci_read_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, &val);
+ if (retval != PCIBIOS_SUCCESSFUL)
goto error;
if ((val & 0x80) == 0) {
dev_info(&SIS5595_dev->dev, "enabling ACPI\n");
- if (pci_write_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, val | 0x80)
- != PCIBIOS_SUCCESSFUL)
+ retval = pci_write_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, val | 0x80);
+ if (retval != PCIBIOS_SUCCESSFUL)
goto error;
- if (pci_read_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, &val)
- != PCIBIOS_SUCCESSFUL)
+ retval = pci_read_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, &val);
+ if (retval != PCIBIOS_SUCCESSFUL)
goto error;
if ((val & 0x80) == 0) {
/* doesn't work for some chips? */
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index ffc54fbf814d..c52d1bec60b4 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index 25c3521cae0e..ce2333408904 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -812,8 +812,7 @@ static int st_i2c_probe(struct platform_device *pdev)
if (!i2c_dev)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- i2c_dev->base = devm_ioremap_resource(&pdev->dev, res);
+ i2c_dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(i2c_dev->base))
return PTR_ERR(i2c_dev->base);
diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c
index 6ad06a5a22b4..ecc54792a66f 100644
--- a/drivers/i2c/busses/i2c-stm32f4.c
+++ b/drivers/i2c/busses/i2c-stm32f4.c
@@ -767,8 +767,7 @@ static int stm32f4_i2c_probe(struct platform_device *pdev)
if (!i2c_dev)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- i2c_dev->base = devm_ioremap_resource(&pdev->dev, res);
+ i2c_dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(i2c_dev->base))
return PTR_ERR(i2c_dev->base);
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index e897d9101434..579b30581725 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -2121,12 +2121,12 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
phy_addr = (dma_addr_t)res->start;
irq_event = platform_get_irq(pdev, 0);
- if (irq_event <= 0)
- return irq_event ? : -ENOENT;
+ if (irq_event < 0)
+ return irq_event;
irq_error = platform_get_irq(pdev, 1);
- if (irq_error <= 0)
- return irq_error ? : -ENOENT;
+ if (irq_error < 0)
+ return irq_error;
i2c_dev->wakeup_src = of_property_read_bool(pdev->dev.of_node,
"wakeup-source");
diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
index 4cc196ca8f6d..bbea521b05dd 100644
--- a/drivers/i2c/busses/i2c-synquacer.c
+++ b/drivers/i2c/busses/i2c-synquacer.c
@@ -557,20 +557,16 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "clock source %p\n", i2c->pclk);
ret = clk_prepare_enable(i2c->pclk);
- if (ret) {
- dev_err(&pdev->dev, "failed to enable clock (%d)\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to enable clock\n");
i2c->pclkrate = clk_get_rate(i2c->pclk);
}
if (i2c->pclkrate < SYNQUACER_I2C_MIN_CLK_RATE ||
- i2c->pclkrate > SYNQUACER_I2C_MAX_CLK_RATE) {
- dev_err(&pdev->dev, "PCLK missing or out of range (%d)\n",
- i2c->pclkrate);
- return -EINVAL;
- }
+ i2c->pclkrate > SYNQUACER_I2C_MAX_CLK_RATE)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "PCLK missing or out of range (%d)\n",
+ i2c->pclkrate);
i2c->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(i2c->base))
@@ -582,10 +578,8 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev, i2c->irq, synquacer_i2c_isr,
0, dev_name(&pdev->dev), i2c);
- if (ret < 0) {
- dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "cannot claim IRQ %d\n", i2c->irq);
i2c->state = STATE_IDLE;
i2c->dev = &pdev->dev;
@@ -605,10 +599,8 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
synquacer_i2c_hw_init(i2c);
ret = i2c_add_numbered_adapter(&i2c->adapter);
- if (ret) {
- dev_err(&pdev->dev, "failed to add bus to i2c core\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to add bus to i2c core\n");
platform_set_drvdata(pdev, i2c);
diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c
index bc3f94561746..b0840fa0f53e 100644
--- a/drivers/i2c/busses/i2c-tegra-bpmp.c
+++ b/drivers/i2c/busses/i2c-tegra-bpmp.c
@@ -12,7 +12,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 03fc10b45bd6..920d5a8cbf4c 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -22,7 +22,7 @@
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index d1fa9ff5aeab..1bffe36c40ad 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -222,6 +222,10 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface,
int retval = -ENOMEM;
u16 version;
+ if (interface->intf_assoc &&
+ interface->intf_assoc->bFunctionClass != USB_CLASS_VENDOR_SPEC)
+ return -ENODEV;
+
dev_dbg(&interface->dev, "probing usb device\n");
/* allocate memory for our device state and initialize it */
diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c
index 4b9536f50800..c60ae531ba57 100644
--- a/drivers/i2c/busses/i2c-virtio.c
+++ b/drivers/i2c/busses/i2c-virtio.c
@@ -243,7 +243,6 @@ static struct virtio_device_id id_table[] = {
};
MODULE_DEVICE_TABLE(virtio, id_table);
-#ifdef CONFIG_PM_SLEEP
static int virtio_i2c_freeze(struct virtio_device *vdev)
{
virtio_i2c_del_vqs(vdev);
@@ -254,7 +253,6 @@ static int virtio_i2c_restore(struct virtio_device *vdev)
{
return virtio_i2c_setup_vqs(vdev->priv);
}
-#endif
static const unsigned int features[] = {
VIRTIO_I2C_F_ZERO_LENGTH_REQUEST,
@@ -269,10 +267,8 @@ static struct virtio_driver virtio_i2c_driver = {
.driver = {
.name = "i2c_virtio",
},
-#ifdef CONFIG_PM_SLEEP
- .freeze = virtio_i2c_freeze,
- .restore = virtio_i2c_restore,
-#endif
+ .freeze = pm_sleep_ptr(virtio_i2c_freeze),
+ .restore = pm_sleep_ptr(virtio_i2c_restore),
};
module_virtio_driver(virtio_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index b3bb97762c85..71391b590ada 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -710,7 +710,7 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
* reset the IP instead of just flush fifos
*/
ret = xiic_reinit(i2c);
- if (!ret)
+ if (ret < 0)
dev_dbg(i2c->adap.dev.parent, "reinit failed\n");
if (i2c->rx_msg) {
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index f59e8c544f36..08a59a920929 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -529,10 +529,8 @@ static int xlp9xx_i2c_probe(struct platform_device *pdev)
err = devm_request_irq(&pdev->dev, priv->irq, xlp9xx_i2c_isr, 0,
pdev->name, priv);
- if (err) {
- dev_err(&pdev->dev, "IRQ request failed!\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "IRQ request failed!\n");
init_completion(&priv->msg_complete);
priv->adapter.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 313904be5f3b..57ff09f18c37 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -341,7 +341,7 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
priv->adap.lock_ops = &i2c_parent_lock_ops;
/* Sanity check on class */
- if (i2c_mux_parent_classes(parent) & class)
+ if (i2c_mux_parent_classes(parent) & class & ~I2C_CLASS_DEPRECATED)
dev_err(&parent->dev,
"Segment %d behind mux can't share classes with ancestors\n",
chan_id);
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index ea838dbae32e..db1b9057612a 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -65,11 +65,11 @@ config I2C_MUX_PCA9541
will be called i2c-mux-pca9541.
config I2C_MUX_PCA954x
- tristate "NXP PCA954x and PCA984x I2C Mux/switches"
+ tristate "NXP PCA954x/PCA984x and Maxim MAX735x/MAX736x I2C Mux/switches"
depends on GPIOLIB || COMPILE_TEST
help
- If you say yes here you get support for the NXP PCA954x
- and PCA984x I2C mux/switch devices.
+ If you say yes here you get support for NXP PCA954x/PCA984x
+ and Maxim MAX735x/MAX736x I2C mux/switch devices.
This driver can also be built as a module. If so, the module
will be called i2c-mux-pca954x.
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index a3a122fae71e..22f2280eab7f 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -243,6 +243,10 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
props[i].name = devm_kstrdup(&pdev->dev, "status", GFP_KERNEL);
props[i].value = devm_kstrdup(&pdev->dev, "ok", GFP_KERNEL);
+ if (!props[i].name || !props[i].value) {
+ err = -ENOMEM;
+ goto err_rollback;
+ }
props[i].length = 3;
of_changeset_init(&priv->chan[i].chgset);
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 5d5cbe0130cd..5ca03bd34c8d 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -105,8 +105,10 @@ static int i2c_mux_gpio_probe_fw(struct gpiomux *mux,
} else if (is_acpi_node(child)) {
rc = acpi_get_local_address(ACPI_HANDLE_FWNODE(child), values + i);
- if (rc)
+ if (rc) {
+ fwnode_handle_put(child);
return dev_err_probe(dev, rc, "Cannot get address\n");
+ }
}
i++;
diff --git a/drivers/i2c/muxes/i2c-mux-gpmux.c b/drivers/i2c/muxes/i2c-mux-gpmux.c
index 0405af0e1510..baccf4bfaf02 100644
--- a/drivers/i2c/muxes/i2c-mux-gpmux.c
+++ b/drivers/i2c/muxes/i2c-mux-gpmux.c
@@ -11,7 +11,7 @@
#include <linux/i2c-mux.h>
#include <linux/module.h>
#include <linux/mux/consumer.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
struct mux {
diff --git a/drivers/i2c/muxes/i2c-mux-ltc4306.c b/drivers/i2c/muxes/i2c-mux-ltc4306.c
index 5a03031519be..23766d853e76 100644
--- a/drivers/i2c/muxes/i2c-mux-ltc4306.c
+++ b/drivers/i2c/muxes/i2c-mux-ltc4306.c
@@ -15,7 +15,6 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -62,7 +61,7 @@ static const struct chip_desc chips[] = {
static bool ltc4306_is_volatile_reg(struct device *dev, unsigned int reg)
{
- return (reg == LTC_REG_CONFIG) ? true : false;
+ return reg == LTC_REG_CONFIG;
}
static const struct regmap_config ltc4306_regmap_config = {
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 0ccee2ae5720..2219062104fb 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -11,6 +11,12 @@
* PCA9540, PCA9542, PCA9543, PCA9544, PCA9545, PCA9546, PCA9547,
* PCA9548, PCA9846, PCA9847, PCA9848 and PCA9849.
*
+ * It's also compatible to Maxims MAX735x I2C switch chips, which are controlled
+ * as the NXP PCA9548 and the MAX736x chips that act like the PCA9544.
+ *
+ * This includes the:
+ * MAX7356, MAX7357, MAX7358, MAX7367, MAX7368 and MAX7369
+ *
* These chips are all controlled via the I2C bus itself, and all have a
* single 8-bit register. The upstream "parent" bus fans out to two,
* four, or eight downstream busses or channels; which of these
@@ -42,6 +48,7 @@
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/property.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <dt-bindings/mux/mux.h>
@@ -51,6 +58,12 @@
#define PCA954X_IRQ_OFFSET 4
enum pca_type {
+ max_7356,
+ max_7357,
+ max_7358,
+ max_7367,
+ max_7368,
+ max_7369,
pca_9540,
pca_9542,
pca_9543,
@@ -88,10 +101,52 @@ struct pca954x {
struct irq_domain *irq;
unsigned int irq_mask;
raw_spinlock_t lock;
+ struct regulator *supply;
};
-/* Provide specs for the PCA954x types we know about */
+/* Provide specs for the MAX735x, PCA954x and PCA984x types we know about */
static const struct chip_desc chips[] = {
+ [max_7356] = {
+ .nchans = 8,
+ .muxtype = pca954x_isswi,
+ .id = { .manufacturer_id = I2C_DEVICE_ID_NONE },
+ },
+ [max_7357] = {
+ .nchans = 8,
+ .muxtype = pca954x_isswi,
+ .id = { .manufacturer_id = I2C_DEVICE_ID_NONE },
+ /*
+ * No interrupt controller support. The interrupt
+ * provides information about stuck channels.
+ */
+ },
+ [max_7358] = {
+ .nchans = 8,
+ .muxtype = pca954x_isswi,
+ .id = { .manufacturer_id = I2C_DEVICE_ID_NONE },
+ /*
+ * No interrupt controller support. The interrupt
+ * provides information about stuck channels.
+ */
+ },
+ [max_7367] = {
+ .nchans = 4,
+ .muxtype = pca954x_isswi,
+ .has_irq = 1,
+ .id = { .manufacturer_id = I2C_DEVICE_ID_NONE },
+ },
+ [max_7368] = {
+ .nchans = 4,
+ .muxtype = pca954x_isswi,
+ .id = { .manufacturer_id = I2C_DEVICE_ID_NONE },
+ },
+ [max_7369] = {
+ .nchans = 4,
+ .enable = 0x4,
+ .muxtype = pca954x_ismux,
+ .has_irq = 1,
+ .id = { .manufacturer_id = I2C_DEVICE_ID_NONE },
+ },
[pca_9540] = {
.nchans = 2,
.enable = 0x4,
@@ -177,6 +232,12 @@ static const struct chip_desc chips[] = {
};
static const struct i2c_device_id pca954x_id[] = {
+ { "max7356", max_7356 },
+ { "max7357", max_7357 },
+ { "max7358", max_7358 },
+ { "max7367", max_7367 },
+ { "max7368", max_7368 },
+ { "max7369", max_7369 },
{ "pca9540", pca_9540 },
{ "pca9542", pca_9542 },
{ "pca9543", pca_9543 },
@@ -194,6 +255,12 @@ static const struct i2c_device_id pca954x_id[] = {
MODULE_DEVICE_TABLE(i2c, pca954x_id);
static const struct of_device_id pca954x_of_match[] = {
+ { .compatible = "maxim,max7356", .data = &chips[max_7356] },
+ { .compatible = "maxim,max7357", .data = &chips[max_7357] },
+ { .compatible = "maxim,max7358", .data = &chips[max_7358] },
+ { .compatible = "maxim,max7367", .data = &chips[max_7367] },
+ { .compatible = "maxim,max7368", .data = &chips[max_7368] },
+ { .compatible = "maxim,max7369", .data = &chips[max_7369] },
{ .compatible = "nxp,pca9540", .data = &chips[pca_9540] },
{ .compatible = "nxp,pca9542", .data = &chips[pca_9542] },
{ .compatible = "nxp,pca9543", .data = &chips[pca_9543] },
@@ -382,6 +449,8 @@ static void pca954x_cleanup(struct i2c_mux_core *muxc)
struct pca954x *data = i2c_mux_priv(muxc);
int c, irq;
+ regulator_disable(data->supply);
+
if (data->irq) {
for (c = 0; c < data->chip->nchans; c++) {
irq = irq_find_mapping(data->irq, c);
@@ -434,10 +503,22 @@ static int pca954x_probe(struct i2c_client *client)
i2c_set_clientdata(client, muxc);
data->client = client;
+ data->supply = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(data->supply))
+ return dev_err_probe(dev, PTR_ERR(data->supply),
+ "Failed to request regulator\n");
+
+ ret = regulator_enable(data->supply);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to enable vdd supply\n");
+
/* Reset the mux if a reset GPIO is specified. */
gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
+ if (IS_ERR(gpio)) {
+ ret = PTR_ERR(gpio);
+ goto fail_cleanup;
+ }
if (gpio) {
udelay(1);
gpiod_set_value_cansleep(gpio, 0);
@@ -454,7 +535,7 @@ static int pca954x_probe(struct i2c_client *client)
ret = i2c_get_device_id(client, &id);
if (ret && ret != -EOPNOTSUPP)
- return ret;
+ goto fail_cleanup;
if (!ret &&
(id.manufacturer_id != data->chip->id.manufacturer_id ||
@@ -462,7 +543,8 @@ static int pca954x_probe(struct i2c_client *client)
dev_warn(dev, "unexpected device id %03x-%03x-%x\n",
id.manufacturer_id, id.part_id,
id.die_revision);
- return -ENODEV;
+ ret = -ENODEV;
+ goto fail_cleanup;
}
}
@@ -481,7 +563,8 @@ static int pca954x_probe(struct i2c_client *client)
ret = pca954x_init(client, data);
if (ret < 0) {
dev_warn(dev, "probe failed\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto fail_cleanup;
}
ret = pca954x_irq_setup(muxc);
@@ -530,7 +613,6 @@ static void pca954x_remove(struct i2c_client *client)
pca954x_cleanup(muxc);
}
-#ifdef CONFIG_PM_SLEEP
static int pca954x_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -544,14 +626,13 @@ static int pca954x_resume(struct device *dev)
return ret;
}
-#endif
-static SIMPLE_DEV_PM_OPS(pca954x_pm, NULL, pca954x_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(pca954x_pm, NULL, pca954x_resume);
static struct i2c_driver pca954x_driver = {
.driver = {
.name = "pca954x",
- .pm = &pca954x_pm,
+ .pm = pm_sleep_ptr(&pca954x_pm),
.of_match_table = pca954x_of_match,
},
.probe = pca954x_probe,
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 08aeb69a7800..87283e4a4607 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -1308,7 +1308,11 @@ static int i3c_master_get_i3c_addrs(struct i3c_dev_desc *dev)
if (dev->info.static_addr) {
status = i3c_bus_get_addr_slot_status(&master->bus,
dev->info.static_addr);
- if (status != I3C_ADDR_SLOT_FREE)
+ /* Since static address and assigned dynamic address can be
+ * equal, allow this case to pass.
+ */
+ if (status != I3C_ADDR_SLOT_FREE &&
+ dev->info.static_addr != dev->boardinfo->init_dyn_addr)
return -EBUSY;
i3c_bus_set_addr_slot_status(&master->bus,
diff --git a/drivers/i3c/master/ast2600-i3c-master.c b/drivers/i3c/master/ast2600-i3c-master.c
index 09ed19d489e9..01a47d3dd499 100644
--- a/drivers/i3c/master/ast2600-i3c-master.c
+++ b/drivers/i3c/master/ast2600-i3c-master.c
@@ -8,7 +8,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index 01610fa5b0cc..49551db71bc9 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -22,7 +22,6 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
-#include <linux/of_device.h>
#define DEV_ID 0x0
#define DEV_ID_I3C_MASTER 0x5034
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
index d97c3175e0e2..6a781f89b0e4 100644
--- a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
@@ -339,7 +339,7 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
break;
}
if (RESP_STATUS(xfer[0].response) == RESP_ERR_NACK &&
- RESP_STATUS(xfer[0].response) == 1) {
+ RESP_DATA_LENGTH(xfer->response) == 1) {
ret = 0; /* no more devices to be assigned */
break;
}
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index 0d63b732ef0c..8f8295acdadb 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -156,6 +156,7 @@ struct svc_i3c_regs_save {
* @base: I3C master controller
* @dev: Corresponding device
* @regs: Memory mapping
+ * @saved_regs: Volatile values for PM operations
* @free_slots: Bit array of available slots
* @addrs: Array containing the dynamic addresses of each attached device
* @descs: Array of descriptors, one per attached device
@@ -789,6 +790,10 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
*/
break;
} else if (SVC_I3C_MSTATUS_NACKED(reg)) {
+ /* No I3C devices attached */
+ if (dev_nb == 0)
+ break;
+
/*
* A slave device nacked the address, this is
* allowed only once, DAA will be stopped and
@@ -1263,11 +1268,17 @@ static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
{
struct svc_i3c_master *master = to_svc_i3c_master(m);
bool broadcast = cmd->id < 0x80;
+ int ret;
if (broadcast)
- return svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
+ ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
else
- return svc_i3c_master_send_direct_ccc_cmd(master, cmd);
+ ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
+
+ if (ret)
+ cmd->err = I3C_ERROR_M2;
+
+ return ret;
}
static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
@@ -1518,8 +1529,8 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
return PTR_ERR(master->sclk);
master->irq = platform_get_irq(pdev, 0);
- if (master->irq <= 0)
- return -ENOENT;
+ if (master->irq < 0)
+ return master->irq;
master->dev = dev;
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 73f913cbd146..7acc0f936dad 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -1457,6 +1457,17 @@ static int config_non_roce_gid_cache(struct ib_device *device,
i);
goto err;
}
+
+ if (rdma_protocol_iwarp(device, port)) {
+ struct net_device *ndev;
+
+ ndev = ib_device_get_netdev(device, port);
+ if (!ndev)
+ continue;
+ RCU_INIT_POINTER(gid_attr.ndev, ndev);
+ dev_put(ndev);
+ }
+
gid_attr.index = i;
tprops->subnet_prefix =
be64_to_cpu(gid_attr.gid.global.subnet_prefix);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 9891c7dc2af5..1e2cd7c8716e 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -686,30 +686,52 @@ cma_validate_port(struct ib_device *device, u32 port,
struct rdma_id_private *id_priv)
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ const struct ib_gid_attr *sgid_attr = ERR_PTR(-ENODEV);
int bound_if_index = dev_addr->bound_dev_if;
- const struct ib_gid_attr *sgid_attr;
int dev_type = dev_addr->dev_type;
struct net_device *ndev = NULL;
if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net))
- return ERR_PTR(-ENODEV);
+ goto out;
if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
- return ERR_PTR(-ENODEV);
+ goto out;
if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
- return ERR_PTR(-ENODEV);
+ goto out;
+
+ /*
+ * For drivers that do not associate more than one net device with
+ * their gid tables, such as iWARP drivers, it is sufficient to
+ * return the first table entry.
+ *
+ * Other driver classes might be included in the future.
+ */
+ if (rdma_protocol_iwarp(device, port)) {
+ sgid_attr = rdma_get_gid_attr(device, port, 0);
+ if (IS_ERR(sgid_attr))
+ goto out;
+
+ rcu_read_lock();
+ ndev = rcu_dereference(sgid_attr->ndev);
+ if (!net_eq(dev_net(ndev), dev_addr->net) ||
+ ndev->ifindex != bound_if_index)
+ sgid_attr = ERR_PTR(-ENODEV);
+ rcu_read_unlock();
+ goto out;
+ }
if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
ndev = dev_get_by_index(dev_addr->net, bound_if_index);
if (!ndev)
- return ERR_PTR(-ENODEV);
+ goto out;
} else {
gid_type = IB_GID_TYPE_IB;
}
sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev);
dev_put(ndev);
+out:
return sgid_attr;
}
@@ -4946,7 +4968,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
int err = 0;
struct sockaddr *addr = (struct sockaddr *)&mc->addr;
struct net_device *ndev = NULL;
- struct ib_sa_multicast ib;
+ struct ib_sa_multicast ib = {};
enum ib_gid_type gid_type;
bool send_only;
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index 7b68b3ea979f..f2fb2d8a6597 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -217,7 +217,7 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
return -ENOMEM;
for (i = 0; i < ports_num; i++) {
- char port_str[10];
+ char port_str[11];
ports[i].port_num = i + 1;
snprintf(port_str, sizeof(port_str), "%u", i + 1);
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 358a2db38d23..eecb369898f5 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -307,7 +307,7 @@ get_remote_info_exit:
struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
u8 nl_client, gfp_t gfp)
{
- struct iwpm_nlmsg_request *nlmsg_request = NULL;
+ struct iwpm_nlmsg_request *nlmsg_request;
unsigned long flags;
nlmsg_request = kzalloc(sizeof(struct iwpm_nlmsg_request), gfp);
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 1b2cc9e45ade..ae2db0c70788 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -75,7 +75,7 @@ static bool is_nl_msg_valid(unsigned int type, unsigned int op)
if (type >= RDMA_NL_NUM_CLIENTS)
return false;
- return (op < max_num_ops[type]) ? true : false;
+ return op < max_num_ops[type];
}
static const struct rdma_nl_cbs *
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index d5d3e4f0de77..6d1dbc978759 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -2529,6 +2529,7 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
},
[RDMA_NLDEV_CMD_SYS_SET] = {
.doit = nldev_set_sys_set_doit,
+ .flags = RDMA_NL_ADMIN_PERM,
},
[RDMA_NLDEV_CMD_STAT_SET] = {
.doit = nldev_stat_set_doit,
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 7c9c79c13941..495d5a5d0373 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -72,12 +72,23 @@ enum {
#define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
static dev_t dynamic_uverbs_dev;
-static struct class *uverbs_class;
static DEFINE_IDA(uverbs_ida);
static int ib_uverbs_add_one(struct ib_device *device);
static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
+static char *uverbs_devnode(const struct device *dev, umode_t *mode)
+{
+ if (mode)
+ *mode = 0666;
+ return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
+}
+
+static const struct class uverbs_class = {
+ .name = "infiniband_verbs",
+ .devnode = uverbs_devnode,
+};
+
/*
* Must be called with the ufile->device->disassociate_srcu held, and the lock
* must be held until use of the ucontext is finished.
@@ -535,7 +546,7 @@ static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
if (hdr->in_words * 4 != count)
return -EINVAL;
- if (count < method_elm->req_size + sizeof(hdr)) {
+ if (count < method_elm->req_size + sizeof(*hdr)) {
/*
* rdma-core v18 and v19 have a bug where they send DESTROY_CQ
* with a 16 byte write instead of 24. Old kernels didn't
@@ -1117,7 +1128,7 @@ static int ib_uverbs_add_one(struct ib_device *device)
}
device_initialize(&uverbs_dev->dev);
- uverbs_dev->dev.class = uverbs_class;
+ uverbs_dev->dev.class = &uverbs_class;
uverbs_dev->dev.parent = device->dev.parent;
uverbs_dev->dev.release = ib_uverbs_release_dev;
uverbs_dev->groups[0] = &dev_attr_group;
@@ -1235,13 +1246,6 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
put_device(&uverbs_dev->dev);
}
-static char *uverbs_devnode(const struct device *dev, umode_t *mode)
-{
- if (mode)
- *mode = 0666;
- return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
-}
-
static int __init ib_uverbs_init(void)
{
int ret;
@@ -1262,16 +1266,13 @@ static int __init ib_uverbs_init(void)
goto out_alloc;
}
- uverbs_class = class_create("infiniband_verbs");
- if (IS_ERR(uverbs_class)) {
- ret = PTR_ERR(uverbs_class);
+ ret = class_register(&uverbs_class);
+ if (ret) {
pr_err("user_verbs: couldn't create class infiniband_verbs\n");
goto out_chrdev;
}
- uverbs_class->devnode = uverbs_devnode;
-
- ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
+ ret = class_create_file(&uverbs_class, &class_attr_abi_version.attr);
if (ret) {
pr_err("user_verbs: couldn't create abi_version attribute\n");
goto out_class;
@@ -1286,7 +1287,7 @@ static int __init ib_uverbs_init(void)
return 0;
out_class:
- class_destroy(uverbs_class);
+ class_unregister(&uverbs_class);
out_chrdev:
unregister_chrdev_region(dynamic_uverbs_dev,
@@ -1303,7 +1304,7 @@ out:
static void __exit ib_uverbs_cleanup(void)
{
ib_unregister_client(&uverbs_client);
- class_destroy(uverbs_class);
+ class_unregister(&uverbs_class);
unregister_chrdev_region(IB_UVERBS_BASE_DEV,
IB_UVERBS_NUM_FIXED_MINOR);
unregister_chrdev_region(dynamic_uverbs_dev,
diff --git a/drivers/infiniband/core/uverbs_std_types_counters.c b/drivers/infiniband/core/uverbs_std_types_counters.c
index 999da9c79866..381aa5797641 100644
--- a/drivers/infiniband/core/uverbs_std_types_counters.c
+++ b/drivers/infiniband/core/uverbs_std_types_counters.c
@@ -107,6 +107,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(
return ret;
uattr = uverbs_attr_get(attrs, UVERBS_ATTR_READ_COUNTERS_BUFF);
+ if (IS_ERR(uattr))
+ return PTR_ERR(uattr);
read_attr.ncounters = uattr->ptr_attr.len / sizeof(u64);
read_attr.counters_buff = uverbs_zalloc(
attrs, array_size(read_attr.ncounters, sizeof(u64)));
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index b99b3cc283b6..41ff5595c860 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1880,6 +1880,89 @@ int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
}
EXPORT_SYMBOL(ib_modify_qp_with_udata);
+static void ib_get_width_and_speed(u32 netdev_speed, u32 lanes,
+ u16 *speed, u8 *width)
+{
+ if (!lanes) {
+ if (netdev_speed <= SPEED_1000) {
+ *width = IB_WIDTH_1X;
+ *speed = IB_SPEED_SDR;
+ } else if (netdev_speed <= SPEED_10000) {
+ *width = IB_WIDTH_1X;
+ *speed = IB_SPEED_FDR10;
+ } else if (netdev_speed <= SPEED_20000) {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_DDR;
+ } else if (netdev_speed <= SPEED_25000) {
+ *width = IB_WIDTH_1X;
+ *speed = IB_SPEED_EDR;
+ } else if (netdev_speed <= SPEED_40000) {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_FDR10;
+ } else if (netdev_speed <= SPEED_50000) {
+ *width = IB_WIDTH_2X;
+ *speed = IB_SPEED_EDR;
+ } else if (netdev_speed <= SPEED_100000) {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_EDR;
+ } else if (netdev_speed <= SPEED_200000) {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_HDR;
+ } else {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_NDR;
+ }
+
+ return;
+ }
+
+ switch (lanes) {
+ case 1:
+ *width = IB_WIDTH_1X;
+ break;
+ case 2:
+ *width = IB_WIDTH_2X;
+ break;
+ case 4:
+ *width = IB_WIDTH_4X;
+ break;
+ case 8:
+ *width = IB_WIDTH_8X;
+ break;
+ case 12:
+ *width = IB_WIDTH_12X;
+ break;
+ default:
+ *width = IB_WIDTH_1X;
+ }
+
+ switch (netdev_speed / lanes) {
+ case SPEED_2500:
+ *speed = IB_SPEED_SDR;
+ break;
+ case SPEED_5000:
+ *speed = IB_SPEED_DDR;
+ break;
+ case SPEED_10000:
+ *speed = IB_SPEED_FDR10;
+ break;
+ case SPEED_14000:
+ *speed = IB_SPEED_FDR;
+ break;
+ case SPEED_25000:
+ *speed = IB_SPEED_EDR;
+ break;
+ case SPEED_50000:
+ *speed = IB_SPEED_HDR;
+ break;
+ case SPEED_100000:
+ *speed = IB_SPEED_NDR;
+ break;
+ default:
+ *speed = IB_SPEED_SDR;
+ }
+}
+
int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width)
{
int rc;
@@ -1904,29 +1987,13 @@ int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width)
netdev_speed = lksettings.base.speed;
} else {
netdev_speed = SPEED_1000;
- pr_warn("%s speed is unknown, defaulting to %u\n", netdev->name,
- netdev_speed);
+ if (rc)
+ pr_warn("%s speed is unknown, defaulting to %u\n",
+ netdev->name, netdev_speed);
}
- if (netdev_speed <= SPEED_1000) {
- *width = IB_WIDTH_1X;
- *speed = IB_SPEED_SDR;
- } else if (netdev_speed <= SPEED_10000) {
- *width = IB_WIDTH_1X;
- *speed = IB_SPEED_FDR10;
- } else if (netdev_speed <= SPEED_20000) {
- *width = IB_WIDTH_4X;
- *speed = IB_SPEED_DDR;
- } else if (netdev_speed <= SPEED_25000) {
- *width = IB_WIDTH_1X;
- *speed = IB_SPEED_EDR;
- } else if (netdev_speed <= SPEED_40000) {
- *width = IB_WIDTH_4X;
- *speed = IB_SPEED_FDR10;
- } else {
- *width = IB_WIDTH_4X;
- *speed = IB_SPEED_EDR;
- }
+ ib_get_width_and_speed(netdev_speed, lksettings.lanes,
+ speed, width);
return 0;
}
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index ea81b2497511..9fd9849ebdd1 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -112,12 +112,34 @@ struct bnxt_re_gsi_context {
#define BNXT_RE_NQ_IDX 1
#define BNXT_RE_GEN_P5_MAX_VF 64
+struct bnxt_re_pacing {
+ u64 dbr_db_fifo_reg_off;
+ void *dbr_page;
+ u64 dbr_bar_addr;
+ u32 pacing_algo_th;
+ u32 do_pacing_save;
+ u32 dbq_pacing_time; /* ms */
+ u32 dbr_def_do_pacing;
+ bool dbr_pacing;
+ struct mutex dbq_lock; /* synchronize db pacing algo */
+};
+
+#define BNXT_RE_MAX_DBR_DO_PACING 0xFFFF
+#define BNXT_RE_DBR_PACING_TIME 5 /* ms */
+#define BNXT_RE_PACING_ALGO_THRESHOLD 250 /* Entries in DB FIFO */
+#define BNXT_RE_PACING_ALARM_TH_MULTIPLE 2 /* Multiple of pacing algo threshold */
+/* Default do_pacing value when there is no congestion */
+#define BNXT_RE_DBR_DO_PACING_NO_CONGESTION 0x7F /* 1 in 512 probability */
+#define BNXT_RE_DB_FIFO_ROOM_MASK 0x1FFF8000
+#define BNXT_RE_MAX_FIFO_DEPTH 0x2c00
+#define BNXT_RE_DB_FIFO_ROOM_SHIFT 15
+#define BNXT_RE_GRC_FIFO_REG_BASE 0x2000
+
struct bnxt_re_dev {
struct ib_device ibdev;
struct list_head list;
unsigned long flags;
#define BNXT_RE_FLAG_NETDEV_REGISTERED 0
-#define BNXT_RE_FLAG_GOT_MSIX 2
#define BNXT_RE_FLAG_HAVE_L2_REF 3
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
#define BNXT_RE_FLAG_QOS_WORK_REG 5
@@ -152,16 +174,9 @@ struct bnxt_re_dev {
struct bnxt_qplib_res qplib_res;
struct bnxt_qplib_dpi dpi_privileged;
- atomic_t qp_count;
struct mutex qp_lock; /* protect qp list */
struct list_head qp_list;
- atomic_t cq_count;
- atomic_t srq_count;
- atomic_t mr_count;
- atomic_t mw_count;
- atomic_t ah_count;
- atomic_t pd_count;
/* Max of 2 lossless traffic class supported per port */
u16 cosq[2];
@@ -171,6 +186,9 @@ struct bnxt_re_dev {
atomic_t nq_alloc_cnt;
u32 is_virtfn;
u32 num_vfs;
+ struct bnxt_re_pacing pacing;
+ struct work_struct dbq_fifo_check_work;
+ struct delayed_work dbq_pacing_work;
};
#define to_bnxt_re_dev(ptr, member) \
@@ -181,6 +199,7 @@ struct bnxt_re_dev {
#define BNXT_RE_ROCEV2_IPV6_PACKET 3
#define BNXT_RE_CHECK_RC(x) ((x) && ((x) != -ETIMEDOUT))
+void bnxt_re_pacing_alert(struct bnxt_re_dev *rdev);
static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev)
{
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 825d512799d9..93572405d6fa 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -61,15 +61,29 @@ static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
[BNXT_RE_ACTIVE_PD].name = "active_pds",
[BNXT_RE_ACTIVE_AH].name = "active_ahs",
[BNXT_RE_ACTIVE_QP].name = "active_qps",
+ [BNXT_RE_ACTIVE_RC_QP].name = "active_rc_qps",
+ [BNXT_RE_ACTIVE_UD_QP].name = "active_ud_qps",
[BNXT_RE_ACTIVE_SRQ].name = "active_srqs",
[BNXT_RE_ACTIVE_CQ].name = "active_cqs",
[BNXT_RE_ACTIVE_MR].name = "active_mrs",
[BNXT_RE_ACTIVE_MW].name = "active_mws",
+ [BNXT_RE_WATERMARK_PD].name = "watermark_pds",
+ [BNXT_RE_WATERMARK_AH].name = "watermark_ahs",
+ [BNXT_RE_WATERMARK_QP].name = "watermark_qps",
+ [BNXT_RE_WATERMARK_RC_QP].name = "watermark_rc_qps",
+ [BNXT_RE_WATERMARK_UD_QP].name = "watermark_ud_qps",
+ [BNXT_RE_WATERMARK_SRQ].name = "watermark_srqs",
+ [BNXT_RE_WATERMARK_CQ].name = "watermark_cqs",
+ [BNXT_RE_WATERMARK_MR].name = "watermark_mrs",
+ [BNXT_RE_WATERMARK_MW].name = "watermark_mws",
+ [BNXT_RE_RESIZE_CQ_CNT].name = "resize_cq_cnt",
[BNXT_RE_RX_PKTS].name = "rx_pkts",
[BNXT_RE_RX_BYTES].name = "rx_bytes",
[BNXT_RE_TX_PKTS].name = "tx_pkts",
[BNXT_RE_TX_BYTES].name = "tx_bytes",
[BNXT_RE_RECOVERABLE_ERRORS].name = "recoverable_errors",
+ [BNXT_RE_TX_ERRORS].name = "tx_roce_errors",
+ [BNXT_RE_TX_DISCARDS].name = "tx_roce_discards",
[BNXT_RE_RX_ERRORS].name = "rx_roce_errors",
[BNXT_RE_RX_DISCARDS].name = "rx_roce_discards",
[BNXT_RE_TO_RETRANSMITS].name = "to_retransmits",
@@ -117,14 +131,25 @@ static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
[BNXT_RE_TX_READ_RES].name = "tx_read_resp",
[BNXT_RE_TX_WRITE_REQ].name = "tx_write_req",
[BNXT_RE_TX_SEND_REQ].name = "tx_send_req",
+ [BNXT_RE_TX_ROCE_PKTS].name = "tx_roce_only_pkts",
+ [BNXT_RE_TX_ROCE_BYTES].name = "tx_roce_only_bytes",
[BNXT_RE_RX_ATOMIC_REQ].name = "rx_atomic_req",
[BNXT_RE_RX_READ_REQ].name = "rx_read_req",
[BNXT_RE_RX_READ_RESP].name = "rx_read_resp",
[BNXT_RE_RX_WRITE_REQ].name = "rx_write_req",
[BNXT_RE_RX_SEND_REQ].name = "rx_send_req",
+ [BNXT_RE_RX_ROCE_PKTS].name = "rx_roce_only_pkts",
+ [BNXT_RE_RX_ROCE_BYTES].name = "rx_roce_only_bytes",
[BNXT_RE_RX_ROCE_GOOD_PKTS].name = "rx_roce_good_pkts",
[BNXT_RE_RX_ROCE_GOOD_BYTES].name = "rx_roce_good_bytes",
- [BNXT_RE_OOB].name = "rx_out_of_buffer"
+ [BNXT_RE_OOB].name = "rx_out_of_buffer",
+ [BNXT_RE_TX_CNP].name = "tx_cnp_pkts",
+ [BNXT_RE_RX_CNP].name = "rx_cnp_pkts",
+ [BNXT_RE_RX_ECN].name = "rx_ecn_marked_pkts",
+ [BNXT_RE_PACING_RESCHED].name = "pacing_reschedule",
+ [BNXT_RE_PACING_CMPL].name = "pacing_complete",
+ [BNXT_RE_PACING_ALERT].name = "pacing_alerts",
+ [BNXT_RE_DB_FIFO_REG].name = "db_fifo_register",
};
static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev,
@@ -136,14 +161,22 @@ static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev,
stats->value[BNXT_RE_TX_READ_RES] = s->tx_read_res;
stats->value[BNXT_RE_TX_WRITE_REQ] = s->tx_write_req;
stats->value[BNXT_RE_TX_SEND_REQ] = s->tx_send_req;
+ stats->value[BNXT_RE_TX_ROCE_PKTS] = s->tx_roce_pkts;
+ stats->value[BNXT_RE_TX_ROCE_BYTES] = s->tx_roce_bytes;
stats->value[BNXT_RE_RX_ATOMIC_REQ] = s->rx_atomic_req;
stats->value[BNXT_RE_RX_READ_REQ] = s->rx_read_req;
stats->value[BNXT_RE_RX_READ_RESP] = s->rx_read_res;
stats->value[BNXT_RE_RX_WRITE_REQ] = s->rx_write_req;
stats->value[BNXT_RE_RX_SEND_REQ] = s->rx_send_req;
+ stats->value[BNXT_RE_RX_ROCE_PKTS] = s->rx_roce_pkts;
+ stats->value[BNXT_RE_RX_ROCE_BYTES] = s->rx_roce_bytes;
stats->value[BNXT_RE_RX_ROCE_GOOD_PKTS] = s->rx_roce_good_pkts;
stats->value[BNXT_RE_RX_ROCE_GOOD_BYTES] = s->rx_roce_good_bytes;
stats->value[BNXT_RE_OOB] = s->rx_out_of_buffer;
+ stats->value[BNXT_RE_TX_CNP] = s->tx_cnp;
+ stats->value[BNXT_RE_RX_CNP] = s->rx_cnp;
+ stats->value[BNXT_RE_RX_ECN] = s->rx_ecn_marked;
+ stats->value[BNXT_RE_OUT_OF_SEQ_ERR] = s->rx_out_of_sequence;
}
static int bnxt_re_get_ext_stat(struct bnxt_re_dev *rdev,
@@ -249,30 +282,59 @@ static void bnxt_re_copy_err_stats(struct bnxt_re_dev *rdev,
err_s->res_oos_drop_count;
}
+static void bnxt_re_copy_db_pacing_stats(struct bnxt_re_dev *rdev,
+ struct rdma_hw_stats *stats)
+{
+ struct bnxt_re_db_pacing_stats *pacing_s = &rdev->stats.pacing;
+
+ stats->value[BNXT_RE_PACING_RESCHED] = pacing_s->resched;
+ stats->value[BNXT_RE_PACING_CMPL] = pacing_s->complete;
+ stats->value[BNXT_RE_PACING_ALERT] = pacing_s->alerts;
+ stats->value[BNXT_RE_DB_FIFO_REG] =
+ readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off);
+}
+
int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
u32 port, int index)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- struct ctx_hw_stats *hw_stats = NULL;
+ struct bnxt_re_res_cntrs *res_s = &rdev->stats.res;
struct bnxt_qplib_roce_stats *err_s = NULL;
+ struct ctx_hw_stats *hw_stats = NULL;
int rc = 0;
hw_stats = rdev->qplib_ctx.stats.dma;
if (!port || !stats)
return -EINVAL;
- stats->value[BNXT_RE_ACTIVE_QP] = atomic_read(&rdev->qp_count);
- stats->value[BNXT_RE_ACTIVE_SRQ] = atomic_read(&rdev->srq_count);
- stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&rdev->cq_count);
- stats->value[BNXT_RE_ACTIVE_MR] = atomic_read(&rdev->mr_count);
- stats->value[BNXT_RE_ACTIVE_MW] = atomic_read(&rdev->mw_count);
- stats->value[BNXT_RE_ACTIVE_PD] = atomic_read(&rdev->pd_count);
- stats->value[BNXT_RE_ACTIVE_AH] = atomic_read(&rdev->ah_count);
+ stats->value[BNXT_RE_ACTIVE_QP] = atomic_read(&res_s->qp_count);
+ stats->value[BNXT_RE_ACTIVE_RC_QP] = atomic_read(&res_s->rc_qp_count);
+ stats->value[BNXT_RE_ACTIVE_UD_QP] = atomic_read(&res_s->ud_qp_count);
+ stats->value[BNXT_RE_ACTIVE_SRQ] = atomic_read(&res_s->srq_count);
+ stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&res_s->cq_count);
+ stats->value[BNXT_RE_ACTIVE_MR] = atomic_read(&res_s->mr_count);
+ stats->value[BNXT_RE_ACTIVE_MW] = atomic_read(&res_s->mw_count);
+ stats->value[BNXT_RE_ACTIVE_PD] = atomic_read(&res_s->pd_count);
+ stats->value[BNXT_RE_ACTIVE_AH] = atomic_read(&res_s->ah_count);
+ stats->value[BNXT_RE_WATERMARK_QP] = res_s->qp_watermark;
+ stats->value[BNXT_RE_WATERMARK_RC_QP] = res_s->rc_qp_watermark;
+ stats->value[BNXT_RE_WATERMARK_UD_QP] = res_s->ud_qp_watermark;
+ stats->value[BNXT_RE_WATERMARK_SRQ] = res_s->srq_watermark;
+ stats->value[BNXT_RE_WATERMARK_CQ] = res_s->cq_watermark;
+ stats->value[BNXT_RE_WATERMARK_MR] = res_s->mr_watermark;
+ stats->value[BNXT_RE_WATERMARK_MW] = res_s->mw_watermark;
+ stats->value[BNXT_RE_WATERMARK_PD] = res_s->pd_watermark;
+ stats->value[BNXT_RE_WATERMARK_AH] = res_s->ah_watermark;
+ stats->value[BNXT_RE_RESIZE_CQ_CNT] = atomic_read(&res_s->resize_count);
if (hw_stats) {
stats->value[BNXT_RE_RECOVERABLE_ERRORS] =
le64_to_cpu(hw_stats->tx_bcast_pkts);
+ stats->value[BNXT_RE_TX_DISCARDS] =
+ le64_to_cpu(hw_stats->tx_discard_pkts);
+ stats->value[BNXT_RE_TX_ERRORS] =
+ le64_to_cpu(hw_stats->tx_error_pkts);
stats->value[BNXT_RE_RX_ERRORS] =
le64_to_cpu(hw_stats->rx_error_pkts);
stats->value[BNXT_RE_RX_DISCARDS] =
@@ -294,6 +356,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
&rdev->flags);
goto done;
}
+ bnxt_re_copy_err_stats(rdev, stats, err_s);
if (_is_ext_stats_supported(rdev->dev_attr.dev_cap_flags) &&
!rdev->is_virtfn) {
rc = bnxt_re_get_ext_stat(rdev, stats);
@@ -303,7 +366,8 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
goto done;
}
}
- bnxt_re_copy_err_stats(rdev, stats, err_s);
+ if (rdev->pacing.dbr_pacing)
+ bnxt_re_copy_db_pacing_stats(rdev, stats);
}
done:
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h
index 7943b2c393e4..e541b6f8ca9f 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.h
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h
@@ -44,15 +44,29 @@ enum bnxt_re_hw_stats {
BNXT_RE_ACTIVE_PD,
BNXT_RE_ACTIVE_AH,
BNXT_RE_ACTIVE_QP,
+ BNXT_RE_ACTIVE_RC_QP,
+ BNXT_RE_ACTIVE_UD_QP,
BNXT_RE_ACTIVE_SRQ,
BNXT_RE_ACTIVE_CQ,
BNXT_RE_ACTIVE_MR,
BNXT_RE_ACTIVE_MW,
+ BNXT_RE_WATERMARK_PD,
+ BNXT_RE_WATERMARK_AH,
+ BNXT_RE_WATERMARK_QP,
+ BNXT_RE_WATERMARK_RC_QP,
+ BNXT_RE_WATERMARK_UD_QP,
+ BNXT_RE_WATERMARK_SRQ,
+ BNXT_RE_WATERMARK_CQ,
+ BNXT_RE_WATERMARK_MR,
+ BNXT_RE_WATERMARK_MW,
+ BNXT_RE_RESIZE_CQ_CNT,
BNXT_RE_RX_PKTS,
BNXT_RE_RX_BYTES,
BNXT_RE_TX_PKTS,
BNXT_RE_TX_BYTES,
BNXT_RE_RECOVERABLE_ERRORS,
+ BNXT_RE_TX_ERRORS,
+ BNXT_RE_TX_DISCARDS,
BNXT_RE_RX_ERRORS,
BNXT_RE_RX_DISCARDS,
BNXT_RE_TO_RETRANSMITS,
@@ -100,19 +114,58 @@ enum bnxt_re_hw_stats {
BNXT_RE_TX_READ_RES,
BNXT_RE_TX_WRITE_REQ,
BNXT_RE_TX_SEND_REQ,
+ BNXT_RE_TX_ROCE_PKTS,
+ BNXT_RE_TX_ROCE_BYTES,
BNXT_RE_RX_ATOMIC_REQ,
BNXT_RE_RX_READ_REQ,
BNXT_RE_RX_READ_RESP,
BNXT_RE_RX_WRITE_REQ,
BNXT_RE_RX_SEND_REQ,
+ BNXT_RE_RX_ROCE_PKTS,
+ BNXT_RE_RX_ROCE_BYTES,
BNXT_RE_RX_ROCE_GOOD_PKTS,
BNXT_RE_RX_ROCE_GOOD_BYTES,
BNXT_RE_OOB,
+ BNXT_RE_TX_CNP,
+ BNXT_RE_RX_CNP,
+ BNXT_RE_RX_ECN,
+ BNXT_RE_PACING_RESCHED,
+ BNXT_RE_PACING_CMPL,
+ BNXT_RE_PACING_ALERT,
+ BNXT_RE_DB_FIFO_REG,
BNXT_RE_NUM_EXT_COUNTERS
};
#define BNXT_RE_NUM_STD_COUNTERS (BNXT_RE_OUT_OF_SEQ_ERR + 1)
+struct bnxt_re_db_pacing_stats {
+ u64 resched;
+ u64 complete;
+ u64 alerts;
+};
+
+struct bnxt_re_res_cntrs {
+ atomic_t qp_count;
+ atomic_t rc_qp_count;
+ atomic_t ud_qp_count;
+ atomic_t cq_count;
+ atomic_t srq_count;
+ atomic_t mr_count;
+ atomic_t mw_count;
+ atomic_t ah_count;
+ atomic_t pd_count;
+ atomic_t resize_count;
+ u64 qp_watermark;
+ u64 rc_qp_watermark;
+ u64 ud_qp_watermark;
+ u64 cq_watermark;
+ u64 srq_watermark;
+ u64 mr_watermark;
+ u64 mw_watermark;
+ u64 ah_watermark;
+ u64 pd_watermark;
+};
+
struct bnxt_re_rstat {
struct bnxt_qplib_roce_stats errs;
struct bnxt_qplib_ext_stat ext_stat;
@@ -120,6 +173,8 @@ struct bnxt_re_rstat {
struct bnxt_re_stats {
struct bnxt_re_rstat rstat;
+ struct bnxt_re_res_cntrs res;
+ struct bnxt_re_db_pacing_stats pacing;
};
struct rdma_hw_stats *bnxt_re_ib_alloc_hw_port_stats(struct ib_device *ibdev,
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 03cc45a5458d..faa88d12ee86 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -284,7 +284,7 @@ int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
int index, union ib_gid *gid)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- int rc = 0;
+ int rc;
/* Ignore port_num */
memset(gid, 0, sizeof(*gid));
@@ -565,6 +565,8 @@ bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
break;
case BNXT_RE_MMAP_UC_DB:
case BNXT_RE_MMAP_WC_DB:
+ case BNXT_RE_MMAP_DBR_BAR:
+ case BNXT_RE_MMAP_DBR_PAGE:
ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx,
&entry->rdma_entry, PAGE_SIZE);
break;
@@ -600,7 +602,7 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
if (!bnxt_qplib_dealloc_pd(&rdev->qplib_res,
&rdev->qplib_res.pd_tbl,
&pd->qplib_pd))
- atomic_dec(&rdev->pd_count);
+ atomic_dec(&rdev->stats.res.pd_count);
}
return 0;
}
@@ -613,10 +615,11 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
udata, struct bnxt_re_ucontext, ib_uctx);
struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_user_mmap_entry *entry = NULL;
+ u32 active_pds;
int rc = 0;
pd->rdev = rdev;
- if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
+ if (bnxt_qplib_alloc_pd(&rdev->qplib_res, &pd->qplib_pd)) {
ibdev_err(&rdev->ibdev, "Failed to allocate HW PD");
rc = -ENOMEM;
goto fail;
@@ -663,7 +666,9 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
if (bnxt_re_create_fence_mr(pd))
ibdev_warn(&rdev->ibdev,
"Failed to create Fence-MR\n");
- atomic_inc(&rdev->pd_count);
+ active_pds = atomic_inc_return(&rdev->stats.res.pd_count);
+ if (active_pds > rdev->stats.res.pd_watermark)
+ rdev->stats.res.pd_watermark = active_pds;
return 0;
dbfail:
@@ -679,7 +684,7 @@ int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
struct bnxt_re_dev *rdev = ah->rdev;
bool block = true;
- int rc = 0;
+ int rc;
block = !(flags & RDMA_DESTROY_AH_SLEEPABLE);
rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block);
@@ -689,7 +694,7 @@ int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
else
goto fail;
}
- atomic_dec(&rdev->ah_count);
+ atomic_dec(&rdev->stats.res.ah_count);
fail:
return rc;
}
@@ -723,6 +728,7 @@ int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
const struct ib_gid_attr *sgid_attr;
struct bnxt_re_gid_ctx *ctx;
struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
+ u32 active_ahs;
u8 nw_type;
int rc;
@@ -775,7 +781,9 @@ int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
wmb(); /* make sure cache is updated. */
spin_unlock_irqrestore(&uctx->sh_lock, flag);
}
- atomic_inc(&rdev->ah_count);
+ active_ahs = atomic_inc_return(&rdev->stats.res.ah_count);
+ if (active_ahs > rdev->stats.res.ah_watermark)
+ rdev->stats.res.ah_watermark = active_ahs;
return 0;
}
@@ -826,7 +834,7 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
struct bnxt_re_qp *gsi_sqp;
struct bnxt_re_ah *gsi_sah;
struct bnxt_re_dev *rdev;
- int rc = 0;
+ int rc;
rdev = qp->rdev;
gsi_sqp = rdev->gsi_ctx.gsi_sqp;
@@ -836,7 +844,7 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
bnxt_qplib_destroy_ah(&rdev->qplib_res,
&gsi_sah->qplib_ah,
true);
- atomic_dec(&rdev->ah_count);
+ atomic_dec(&rdev->stats.res.ah_count);
bnxt_qplib_clean_qp(&qp->qplib_qp);
ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
@@ -851,7 +859,7 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
mutex_lock(&rdev->qp_lock);
list_del(&gsi_sqp->list);
mutex_unlock(&rdev->qp_lock);
- atomic_dec(&rdev->qp_count);
+ atomic_dec(&rdev->stats.res.qp_count);
kfree(rdev->gsi_ctx.sqp_tbl);
kfree(gsi_sah);
@@ -901,7 +909,11 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
mutex_lock(&rdev->qp_lock);
list_del(&qp->list);
mutex_unlock(&rdev->qp_lock);
- atomic_dec(&rdev->qp_count);
+ atomic_dec(&rdev->stats.res.qp_count);
+ if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RC)
+ atomic_dec(&rdev->stats.res.rc_qp_count);
+ else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
+ atomic_dec(&rdev->stats.res.ud_qp_count);
ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem);
@@ -1095,7 +1107,7 @@ static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
"Failed to allocate HW AH for Shadow QP");
goto fail;
}
- atomic_inc(&rdev->ah_count);
+ atomic_inc(&rdev->stats.res.ah_count);
return ah;
@@ -1163,7 +1175,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
INIT_LIST_HEAD(&qp->list);
mutex_lock(&rdev->qp_lock);
list_add_tail(&qp->list, &rdev->qp_list);
- atomic_inc(&rdev->qp_count);
+ atomic_inc(&rdev->stats.res.qp_count);
mutex_unlock(&rdev->qp_lock);
return qp;
fail:
@@ -1340,8 +1352,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
qplqp->pd = &pd->qplib_pd;
qplqp->qp_handle = (u64)qplqp;
qplqp->max_inline_data = init_attr->cap.max_inline_data;
- qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ?
- true : false);
+ qplqp->sig_type = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
qptype = bnxt_re_init_qp_type(rdev, init_attr);
if (qptype < 0) {
rc = qptype;
@@ -1446,7 +1457,7 @@ static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
{
struct bnxt_re_dev *rdev;
struct bnxt_qplib_qp *qplqp;
- int rc = 0;
+ int rc;
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
@@ -1497,6 +1508,7 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+ u32 active_qps;
int rc;
rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
@@ -1545,7 +1557,18 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
mutex_lock(&rdev->qp_lock);
list_add_tail(&qp->list, &rdev->qp_list);
mutex_unlock(&rdev->qp_lock);
- atomic_inc(&rdev->qp_count);
+ active_qps = atomic_inc_return(&rdev->stats.res.qp_count);
+ if (active_qps > rdev->stats.res.qp_watermark)
+ rdev->stats.res.qp_watermark = active_qps;
+ if (qp_init_attr->qp_type == IB_QPT_RC) {
+ active_qps = atomic_inc_return(&rdev->stats.res.rc_qp_count);
+ if (active_qps > rdev->stats.res.rc_qp_watermark)
+ rdev->stats.res.rc_qp_watermark = active_qps;
+ } else if (qp_init_attr->qp_type == IB_QPT_UD) {
+ active_qps = atomic_inc_return(&rdev->stats.res.ud_qp_count);
+ if (active_qps > rdev->stats.res.ud_qp_watermark)
+ rdev->stats.res.ud_qp_watermark = active_qps;
+ }
return 0;
qp_destroy:
@@ -1648,7 +1671,7 @@ int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
nq = qplib_srq->cq->nq;
bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
ib_umem_release(srq->umem);
- atomic_dec(&rdev->srq_count);
+ atomic_dec(&rdev->stats.res.srq_count);
if (nq)
nq->budget--;
return 0;
@@ -1696,6 +1719,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
struct bnxt_re_srq *srq;
struct bnxt_re_pd *pd;
struct ib_pd *ib_pd;
+ u32 active_srqs;
int rc, entries;
ib_pd = ib_srq->pd;
@@ -1760,7 +1784,9 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
}
if (nq)
nq->budget++;
- atomic_inc(&rdev->srq_count);
+ active_srqs = atomic_inc_return(&rdev->stats.res.srq_count);
+ if (active_srqs > rdev->stats.res.srq_watermark)
+ rdev->stats.res.srq_watermark = active_srqs;
spin_lock_init(&srq->lock);
return 0;
@@ -1862,7 +1888,7 @@ static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
int qp_attr_mask)
{
struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
- int rc = 0;
+ int rc;
if (qp_attr_mask & IB_QP_STATE) {
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
@@ -2212,7 +2238,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
u8 ip_version = 0;
u16 vlan_id = 0xFFFF;
void *buf;
- int i, rc = 0;
+ int i, rc;
memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
@@ -2250,7 +2276,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
}
is_eth = true;
- is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
+ is_vlan = vlan_id && (vlan_id < 0x1000);
ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
ip_version, is_udp, 0, &qp->qp1_hdr);
@@ -2787,7 +2813,6 @@ static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_qplib_swqe wqe;
int rc = 0;
- memset(&wqe, 0, sizeof(wqe));
while (wr) {
/* House keeping */
memset(&wqe, 0, sizeof(wqe));
@@ -2886,7 +2911,7 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
ib_umem_release(cq->umem);
- atomic_dec(&rdev->cq_count);
+ atomic_dec(&rdev->stats.res.cq_count);
nq->budget--;
kfree(cq->cql);
return 0;
@@ -2902,6 +2927,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
int cqe = attr->cqe;
struct bnxt_qplib_nq *nq = NULL;
unsigned int nq_alloc_cnt;
+ u32 active_cqs;
if (attr->flags)
return -EOPNOTSUPP;
@@ -2970,7 +2996,9 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->cq_period = cq->qplib_cq.period;
nq->budget++;
- atomic_inc(&rdev->cq_count);
+ active_cqs = atomic_inc_return(&rdev->stats.res.cq_count);
+ if (active_cqs > rdev->stats.res.cq_watermark)
+ rdev->stats.res.cq_watermark = active_cqs;
spin_lock_init(&cq->cq_lock);
if (udata) {
@@ -3083,6 +3111,7 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
}
cq->ib_cq.cqe = cq->resize_cqe;
+ atomic_inc(&rdev->stats.res.resize_count);
return 0;
@@ -3319,26 +3348,21 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
struct bnxt_re_dev *rdev = gsi_qp->rdev;
struct bnxt_re_sqp_entries *sqp_entry = NULL;
struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
+ dma_addr_t shrq_hdr_buf_map;
+ struct ib_sge s_sge[2] = {};
+ struct ib_sge r_sge[2] = {};
struct bnxt_re_ah *gsi_sah;
+ struct ib_recv_wr rwr = {};
+ dma_addr_t rq_hdr_buf_map;
+ struct ib_ud_wr udwr = {};
struct ib_send_wr *swr;
- struct ib_ud_wr udwr;
- struct ib_recv_wr rwr;
+ u32 skip_bytes = 0;
int pkt_type = 0;
- u32 tbl_idx;
void *rq_hdr_buf;
- dma_addr_t rq_hdr_buf_map;
- dma_addr_t shrq_hdr_buf_map;
u32 offset = 0;
- u32 skip_bytes = 0;
- struct ib_sge s_sge[2];
- struct ib_sge r_sge[2];
+ u32 tbl_idx;
int rc;
- memset(&udwr, 0, sizeof(udwr));
- memset(&rwr, 0, sizeof(rwr));
- memset(&s_sge, 0, sizeof(s_sge));
- memset(&r_sge, 0, sizeof(r_sge));
-
swr = &udwr.wr;
tbl_idx = cqe->wr_id;
@@ -3578,7 +3602,7 @@ static int send_phantom_wqe(struct bnxt_re_qp *qp)
{
struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
unsigned long flags;
- int rc = 0;
+ int rc;
spin_lock_irqsave(&qp->sq_lock, flags);
@@ -3768,6 +3792,7 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_re_mr *mr;
+ u32 active_mrs;
int rc;
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
@@ -3795,7 +3820,9 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_ATOMIC))
mr->ib_mr.rkey = mr->ib_mr.lkey;
- atomic_inc(&rdev->mr_count);
+ active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
+ if (active_mrs > rdev->stats.res.mr_watermark)
+ rdev->stats.res.mr_watermark = active_mrs;
return &mr->ib_mr;
@@ -3828,7 +3855,7 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
ib_umem_release(mr->ib_umem);
kfree(mr);
- atomic_dec(&rdev->mr_count);
+ atomic_dec(&rdev->stats.res.mr_count);
return rc;
}
@@ -3858,6 +3885,7 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_re_mr *mr = NULL;
+ u32 active_mrs;
int rc;
if (type != IB_MR_TYPE_MEM_REG) {
@@ -3896,7 +3924,9 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
goto fail_mr;
}
- atomic_inc(&rdev->mr_count);
+ active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
+ if (active_mrs > rdev->stats.res.mr_watermark)
+ rdev->stats.res.mr_watermark = active_mrs;
return &mr->ib_mr;
fail_mr:
@@ -3914,6 +3944,7 @@ struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_re_mw *mw;
+ u32 active_mws;
int rc;
mw = kzalloc(sizeof(*mw), GFP_KERNEL);
@@ -3932,7 +3963,9 @@ struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
}
mw->ib_mw.rkey = mw->qplib_mw.rkey;
- atomic_inc(&rdev->mw_count);
+ active_mws = atomic_inc_return(&rdev->stats.res.mw_count);
+ if (active_mws > rdev->stats.res.mw_watermark)
+ rdev->stats.res.mw_watermark = active_mws;
return &mw->ib_mw;
fail:
@@ -3953,21 +3986,19 @@ int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
}
kfree(mw);
- atomic_dec(&rdev->mw_count);
+ atomic_dec(&rdev->stats.res.mw_count);
return rc;
}
-/* uverbs */
-struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
- u64 virt_addr, int mr_access_flags,
- struct ib_udata *udata)
+static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64 virt_addr,
+ int mr_access_flags, struct ib_umem *umem)
{
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
- struct bnxt_re_mr *mr;
- struct ib_umem *umem;
unsigned long page_size;
+ struct bnxt_re_mr *mr;
int umem_pgs, rc;
+ u32 active_mrs;
if (length > BNXT_RE_MAX_MR_SIZE) {
ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n",
@@ -3975,6 +4006,12 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
return ERR_PTR(-ENOMEM);
}
+ page_size = ib_umem_find_best_pgsz(umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr);
+ if (!page_size) {
+ ibdev_err(&rdev->ibdev, "umem page size unsupported!");
+ return ERR_PTR(-EINVAL);
+ }
+
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
@@ -3986,45 +4023,33 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc) {
- ibdev_err(&rdev->ibdev, "Failed to allocate MR");
+ ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
+ rc = -EIO;
goto free_mr;
}
/* The fixed portion of the rkey is the same as the lkey */
mr->ib_mr.rkey = mr->qplib_mr.rkey;
-
- umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
- if (IS_ERR(umem)) {
- ibdev_err(&rdev->ibdev, "Failed to get umem");
- rc = -EFAULT;
- goto free_mrw;
- }
mr->ib_umem = umem;
-
mr->qplib_mr.va = virt_addr;
- page_size = ib_umem_find_best_pgsz(
- umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr);
- if (!page_size) {
- ibdev_err(&rdev->ibdev, "umem page size unsupported!");
- rc = -EFAULT;
- goto free_umem;
- }
mr->qplib_mr.total_size = length;
umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
umem_pgs, page_size);
if (rc) {
- ibdev_err(&rdev->ibdev, "Failed to register user MR");
- goto free_umem;
+ ibdev_err(&rdev->ibdev, "Failed to register user MR - rc = %d\n", rc);
+ rc = -EIO;
+ goto free_mrw;
}
mr->ib_mr.lkey = mr->qplib_mr.lkey;
mr->ib_mr.rkey = mr->qplib_mr.lkey;
- atomic_inc(&rdev->mr_count);
+ active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
+ if (active_mrs > rdev->stats.res.mr_watermark)
+ rdev->stats.res.mr_watermark = active_mrs;
return &mr->ib_mr;
-free_umem:
- ib_umem_release(umem);
+
free_mrw:
bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
free_mr:
@@ -4032,6 +4057,48 @@ free_mr:
return ERR_PTR(rc);
}
+struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct ib_umem *umem;
+ struct ib_mr *ib_mr;
+
+ umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
+ if (IS_ERR(umem))
+ return ERR_CAST(umem);
+
+ ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
+ if (IS_ERR(ib_mr))
+ ib_umem_release(umem);
+ return ib_mr;
+}
+
+struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
+ u64 length, u64 virt_addr, int fd,
+ int mr_access_flags, struct ib_udata *udata)
+{
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct ib_umem_dmabuf *umem_dmabuf;
+ struct ib_umem *umem;
+ struct ib_mr *ib_mr;
+
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(&rdev->ibdev, start, length,
+ fd, mr_access_flags);
+ if (IS_ERR(umem_dmabuf))
+ return ERR_CAST(umem_dmabuf);
+
+ umem = &umem_dmabuf->umem;
+
+ ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
+ if (IS_ERR(ib_mr))
+ ib_umem_release(umem);
+ return ib_mr;
+}
+
int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
{
struct ib_device *ibdev = ctx->device;
@@ -4087,6 +4154,8 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
goto cfail;
}
uctx->shpage_mmap = &entry->rdma_entry;
+ if (rdev->pacing.dbr_pacing)
+ resp.comp_mask |= BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED;
rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
if (rc) {
@@ -4159,6 +4228,19 @@ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
case BNXT_RE_MMAP_SH_PAGE:
ret = vm_insert_page(vma, vma->vm_start, virt_to_page(uctx->shpg));
break;
+ case BNXT_RE_MMAP_DBR_BAR:
+ pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
+ ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot),
+ rdma_entry);
+ break;
+ case BNXT_RE_MMAP_DBR_PAGE:
+ /* Driver doesn't expect write access for user space */
+ if (vma->vm_flags & VM_WRITE)
+ return -EFAULT;
+ ret = vm_insert_page(vma, vma->vm_start,
+ virt_to_page((void *)bnxt_entry->mem_offset));
+ break;
default:
ret = -EINVAL;
break;
@@ -4178,6 +4260,15 @@ void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
kfree(bnxt_entry);
}
+static int UVERBS_HANDLER(BNXT_RE_METHOD_NOTIFY_DRV)(struct uverbs_attr_bundle *attrs)
+{
+ struct bnxt_re_ucontext *uctx;
+
+ uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
+ bnxt_re_pacing_alert(uctx->rdev);
+ return 0;
+}
+
static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *attrs)
{
struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
@@ -4190,7 +4281,7 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *
u64 mmap_offset;
u32 length;
u32 dpi;
- u64 dbr;
+ u64 addr;
int err;
uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
@@ -4212,19 +4303,30 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *
return -ENOMEM;
length = PAGE_SIZE;
dpi = uctx->wcdpi.dpi;
- dbr = (u64)uctx->wcdpi.umdbr;
+ addr = (u64)uctx->wcdpi.umdbr;
mmap_flag = BNXT_RE_MMAP_WC_DB;
} else {
return -EINVAL;
}
break;
+ case BNXT_RE_ALLOC_DBR_BAR_PAGE:
+ length = PAGE_SIZE;
+ addr = (u64)rdev->pacing.dbr_bar_addr;
+ mmap_flag = BNXT_RE_MMAP_DBR_BAR;
+ break;
+
+ case BNXT_RE_ALLOC_DBR_PAGE:
+ length = PAGE_SIZE;
+ addr = (u64)rdev->pacing.dbr_page;
+ mmap_flag = BNXT_RE_MMAP_DBR_PAGE;
+ break;
default:
return -EOPNOTSUPP;
}
- entry = bnxt_re_mmap_entry_insert(uctx, dbr, mmap_flag, &mmap_offset);
+ entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mmap_offset);
if (!entry)
return -ENOMEM;
@@ -4264,6 +4366,9 @@ static int alloc_page_obj_cleanup(struct ib_uobject *uobject,
uctx->wcdpi.dbr = NULL;
}
break;
+ case BNXT_RE_MMAP_DBR_BAR:
+ case BNXT_RE_MMAP_DBR_PAGE:
+ break;
default:
goto exit;
}
@@ -4301,7 +4406,13 @@ DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_ALLOC_PAGE,
&UVERBS_METHOD(BNXT_RE_METHOD_ALLOC_PAGE),
&UVERBS_METHOD(BNXT_RE_METHOD_DESTROY_PAGE));
+DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_NOTIFY_DRV);
+
+DECLARE_UVERBS_GLOBAL_METHODS(BNXT_RE_OBJECT_NOTIFY_DRV,
+ &UVERBS_METHOD(BNXT_RE_METHOD_NOTIFY_DRV));
+
const struct uapi_definition bnxt_re_uapi_defs[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_NOTIFY_DRV),
{}
};
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 32d9e9d09791..84715b7e7a4e 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -146,6 +146,8 @@ enum bnxt_re_mmap_flag {
BNXT_RE_MMAP_SH_PAGE,
BNXT_RE_MMAP_UC_DB,
BNXT_RE_MMAP_WC_DB,
+ BNXT_RE_MMAP_DBR_PAGE,
+ BNXT_RE_MMAP_DBR_BAR,
};
struct bnxt_re_user_mmap_entry {
@@ -227,6 +229,10 @@ int bnxt_re_dealloc_mw(struct ib_mw *mw);
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata);
+struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
+ u64 length, u64 virt_addr,
+ int fd, int mr_access_flags,
+ struct ib_udata *udata);
int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 63e98e2d3596..c9066aade412 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -360,7 +360,7 @@ static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
{
struct bnxt_en_dev *en_dev;
- int rc = 0;
+ int rc;
en_dev = rdev->en_dev;
@@ -395,10 +395,9 @@ static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
struct bnxt_en_dev *en_dev = rdev->en_dev;
struct hwrm_func_qcfg_output resp = {0};
struct hwrm_func_qcfg_input req = {0};
- struct bnxt_fw_msg fw_msg;
+ struct bnxt_fw_msg fw_msg = {};
int rc;
- memset(&fw_msg, 0, sizeof(fw_msg));
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCFG);
req.fid = cpu_to_le16(0xffff);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
@@ -432,9 +431,219 @@ int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev)
return rc;
cctx->modes.db_push = le32_to_cpu(resp.flags) & FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE;
+ cctx->modes.dbr_pacing =
+ le32_to_cpu(resp.flags_ext2) &
+ FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED;
+ return 0;
+}
+
+static int bnxt_re_hwrm_dbr_pacing_qcfg(struct bnxt_re_dev *rdev)
+{
+ struct hwrm_func_dbr_pacing_qcfg_output resp = {};
+ struct hwrm_func_dbr_pacing_qcfg_input req = {};
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct bnxt_qplib_chip_ctx *cctx;
+ struct bnxt_fw_msg fw_msg = {};
+ int rc;
+
+ cctx = rdev->chip_ctx;
+ bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_DBR_PACING_QCFG);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ rc = bnxt_send_msg(en_dev, &fw_msg);
+ if (rc)
+ return rc;
+
+ if ((le32_to_cpu(resp.dbr_stat_db_fifo_reg) &
+ FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK) ==
+ FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC)
+ cctx->dbr_stat_db_fifo =
+ le32_to_cpu(resp.dbr_stat_db_fifo_reg) &
+ ~FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK;
+ return 0;
+}
+
+/* Update the pacing tunable parameters to the default values */
+static void bnxt_re_set_default_pacing_data(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
+
+ pacing_data->do_pacing = rdev->pacing.dbr_def_do_pacing;
+ pacing_data->pacing_th = rdev->pacing.pacing_algo_th;
+ pacing_data->alarm_th =
+ pacing_data->pacing_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE;
+}
+
+static void __wait_for_fifo_occupancy_below_th(struct bnxt_re_dev *rdev)
+{
+ u32 read_val, fifo_occup;
+
+ /* loop shouldn't run infintely as the occupancy usually goes
+ * below pacing algo threshold as soon as pacing kicks in.
+ */
+ while (1) {
+ read_val = readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off);
+ fifo_occup = BNXT_RE_MAX_FIFO_DEPTH -
+ ((read_val & BNXT_RE_DB_FIFO_ROOM_MASK) >>
+ BNXT_RE_DB_FIFO_ROOM_SHIFT);
+ /* Fifo occupancy cannot be greater the MAX FIFO depth */
+ if (fifo_occup > BNXT_RE_MAX_FIFO_DEPTH)
+ break;
+
+ if (fifo_occup < rdev->qplib_res.pacing_data->pacing_th)
+ break;
+ }
+}
+
+static void bnxt_re_db_fifo_check(struct work_struct *work)
+{
+ struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
+ dbq_fifo_check_work);
+ struct bnxt_qplib_db_pacing_data *pacing_data;
+ u32 pacing_save;
+
+ if (!mutex_trylock(&rdev->pacing.dbq_lock))
+ return;
+ pacing_data = rdev->qplib_res.pacing_data;
+ pacing_save = rdev->pacing.do_pacing_save;
+ __wait_for_fifo_occupancy_below_th(rdev);
+ cancel_delayed_work_sync(&rdev->dbq_pacing_work);
+ if (pacing_save > rdev->pacing.dbr_def_do_pacing) {
+ /* Double the do_pacing value during the congestion */
+ pacing_save = pacing_save << 1;
+ } else {
+ /*
+ * when a new congestion is detected increase the do_pacing
+ * by 8 times. And also increase the pacing_th by 4 times. The
+ * reason to increase pacing_th is to give more space for the
+ * queue to oscillate down without getting empty, but also more
+ * room for the queue to increase without causing another alarm.
+ */
+ pacing_save = pacing_save << 3;
+ pacing_data->pacing_th = rdev->pacing.pacing_algo_th * 4;
+ }
+
+ if (pacing_save > BNXT_RE_MAX_DBR_DO_PACING)
+ pacing_save = BNXT_RE_MAX_DBR_DO_PACING;
+
+ pacing_data->do_pacing = pacing_save;
+ rdev->pacing.do_pacing_save = pacing_data->do_pacing;
+ pacing_data->alarm_th =
+ pacing_data->pacing_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE;
+ schedule_delayed_work(&rdev->dbq_pacing_work,
+ msecs_to_jiffies(rdev->pacing.dbq_pacing_time));
+ rdev->stats.pacing.alerts++;
+ mutex_unlock(&rdev->pacing.dbq_lock);
+}
+
+static void bnxt_re_pacing_timer_exp(struct work_struct *work)
+{
+ struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
+ dbq_pacing_work.work);
+ struct bnxt_qplib_db_pacing_data *pacing_data;
+ u32 read_val, fifo_occup;
+
+ if (!mutex_trylock(&rdev->pacing.dbq_lock))
+ return;
+
+ pacing_data = rdev->qplib_res.pacing_data;
+ read_val = readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off);
+ fifo_occup = BNXT_RE_MAX_FIFO_DEPTH -
+ ((read_val & BNXT_RE_DB_FIFO_ROOM_MASK) >>
+ BNXT_RE_DB_FIFO_ROOM_SHIFT);
+
+ if (fifo_occup > pacing_data->pacing_th)
+ goto restart_timer;
+
+ /*
+ * Instead of immediately going back to the default do_pacing
+ * reduce it by 1/8 times and restart the timer.
+ */
+ pacing_data->do_pacing = pacing_data->do_pacing - (pacing_data->do_pacing >> 3);
+ pacing_data->do_pacing = max_t(u32, rdev->pacing.dbr_def_do_pacing, pacing_data->do_pacing);
+ if (pacing_data->do_pacing <= rdev->pacing.dbr_def_do_pacing) {
+ bnxt_re_set_default_pacing_data(rdev);
+ rdev->stats.pacing.complete++;
+ goto dbq_unlock;
+ }
+
+restart_timer:
+ schedule_delayed_work(&rdev->dbq_pacing_work,
+ msecs_to_jiffies(rdev->pacing.dbq_pacing_time));
+ rdev->stats.pacing.resched++;
+dbq_unlock:
+ rdev->pacing.do_pacing_save = pacing_data->do_pacing;
+ mutex_unlock(&rdev->pacing.dbq_lock);
+}
+
+void bnxt_re_pacing_alert(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_db_pacing_data *pacing_data;
+
+ if (!rdev->pacing.dbr_pacing)
+ return;
+ mutex_lock(&rdev->pacing.dbq_lock);
+ pacing_data = rdev->qplib_res.pacing_data;
+
+ /*
+ * Increase the alarm_th to max so that other user lib instances do not
+ * keep alerting the driver.
+ */
+ pacing_data->alarm_th = BNXT_RE_MAX_FIFO_DEPTH;
+ pacing_data->do_pacing = BNXT_RE_MAX_DBR_DO_PACING;
+ cancel_work_sync(&rdev->dbq_fifo_check_work);
+ schedule_work(&rdev->dbq_fifo_check_work);
+ mutex_unlock(&rdev->pacing.dbq_lock);
+}
+
+static int bnxt_re_initialize_dbr_pacing(struct bnxt_re_dev *rdev)
+{
+ if (bnxt_re_hwrm_dbr_pacing_qcfg(rdev))
+ return -EIO;
+
+ /* Allocate a page for app use */
+ rdev->pacing.dbr_page = (void *)__get_free_page(GFP_KERNEL);
+ if (!rdev->pacing.dbr_page)
+ return -ENOMEM;
+
+ memset((u8 *)rdev->pacing.dbr_page, 0, PAGE_SIZE);
+ rdev->qplib_res.pacing_data = (struct bnxt_qplib_db_pacing_data *)rdev->pacing.dbr_page;
+
+ /* MAP HW window 2 for reading db fifo depth */
+ writel(rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_BASE_MASK,
+ rdev->en_dev->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
+ rdev->pacing.dbr_db_fifo_reg_off =
+ (rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_OFFSET_MASK) +
+ BNXT_RE_GRC_FIFO_REG_BASE;
+ rdev->pacing.dbr_bar_addr =
+ pci_resource_start(rdev->qplib_res.pdev, 0) + rdev->pacing.dbr_db_fifo_reg_off;
+
+ rdev->pacing.pacing_algo_th = BNXT_RE_PACING_ALGO_THRESHOLD;
+ rdev->pacing.dbq_pacing_time = BNXT_RE_DBR_PACING_TIME;
+ rdev->pacing.dbr_def_do_pacing = BNXT_RE_DBR_DO_PACING_NO_CONGESTION;
+ rdev->pacing.do_pacing_save = rdev->pacing.dbr_def_do_pacing;
+ rdev->qplib_res.pacing_data->fifo_max_depth = BNXT_RE_MAX_FIFO_DEPTH;
+ rdev->qplib_res.pacing_data->fifo_room_mask = BNXT_RE_DB_FIFO_ROOM_MASK;
+ rdev->qplib_res.pacing_data->fifo_room_shift = BNXT_RE_DB_FIFO_ROOM_SHIFT;
+ rdev->qplib_res.pacing_data->grc_reg_offset = rdev->pacing.dbr_db_fifo_reg_off;
+ bnxt_re_set_default_pacing_data(rdev);
+ /* Initialize worker for DBR Pacing */
+ INIT_WORK(&rdev->dbq_fifo_check_work, bnxt_re_db_fifo_check);
+ INIT_DELAYED_WORK(&rdev->dbq_pacing_work, bnxt_re_pacing_timer_exp);
return 0;
}
+static void bnxt_re_deinitialize_dbr_pacing(struct bnxt_re_dev *rdev)
+{
+ cancel_work_sync(&rdev->dbq_fifo_check_work);
+ cancel_delayed_work_sync(&rdev->dbq_pacing_work);
+ if (rdev->pacing.dbr_page)
+ free_page((u64)rdev->pacing.dbr_page);
+
+ rdev->pacing.dbr_page = NULL;
+ rdev->pacing.dbr_pacing = false;
+}
+
static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
u16 fw_ring_id, int type)
{
@@ -652,6 +861,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.query_qp = bnxt_re_query_qp,
.query_srq = bnxt_re_query_srq,
.reg_user_mr = bnxt_re_reg_user_mr,
+ .reg_user_mr_dmabuf = bnxt_re_reg_user_mr_dmabuf,
.req_notify_cq = bnxt_re_req_notify_cq,
.resize_cq = bnxt_re_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),
@@ -711,13 +921,14 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct bnxt_aux_priv *aux_priv,
rdev->id = rdev->en_dev->pdev->devfn;
INIT_LIST_HEAD(&rdev->qp_list);
mutex_init(&rdev->qp_lock);
- atomic_set(&rdev->qp_count, 0);
- atomic_set(&rdev->cq_count, 0);
- atomic_set(&rdev->srq_count, 0);
- atomic_set(&rdev->mr_count, 0);
- atomic_set(&rdev->mw_count, 0);
- atomic_set(&rdev->ah_count, 0);
- atomic_set(&rdev->pd_count, 0);
+ mutex_init(&rdev->pacing.dbq_lock);
+ atomic_set(&rdev->stats.res.qp_count, 0);
+ atomic_set(&rdev->stats.res.cq_count, 0);
+ atomic_set(&rdev->stats.res.srq_count, 0);
+ atomic_set(&rdev->stats.res.mr_count, 0);
+ atomic_set(&rdev->stats.res.mw_count, 0);
+ atomic_set(&rdev->stats.res.ah_count, 0);
+ atomic_set(&rdev->stats.res.pd_count, 0);
rdev->cosq[0] = 0xFFFF;
rdev->cosq[1] = 0xFFFF;
@@ -759,7 +970,7 @@ static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
struct bnxt_re_qp *qp)
{
- struct ib_event event;
+ struct ib_event event = {};
unsigned int flags;
if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR &&
@@ -769,7 +980,6 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
bnxt_re_unlock_cqs(qp, flags);
}
- memset(&event, 0, sizeof(event));
if (qp->qplib_qp.srq) {
event.device = &qp->rdev->ibdev;
event.element.qp = &qp->ib_qp;
@@ -937,13 +1147,12 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
{
struct bnxt_re_ring_attr rattr = {};
int num_vec_created = 0;
- int rc = 0, i;
+ int rc, i;
u8 type;
/* Configure and allocate resources for qplib */
rdev->qplib_res.rcfw = &rdev->rcfw;
- rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
- rdev->is_virtfn);
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
if (rc)
goto fail;
@@ -1090,11 +1299,10 @@ static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
{
u32 prio_map = 0, tmp_map = 0;
struct net_device *netdev;
- struct dcb_app app;
+ struct dcb_app app = {};
netdev = rdev->netdev;
- memset(&app, 0, sizeof(app));
app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
app.protocol = ETH_P_IBOE;
tmp_map = dcb_ieee_getapp_mask(netdev, &app);
@@ -1123,8 +1331,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
*/
if ((prio_map == 0 && rdev->qplib_res.prio) ||
(prio_map != 0 && !rdev->qplib_res.prio)) {
- rdev->qplib_res.prio = prio_map ? true : false;
-
+ rdev->qplib_res.prio = prio_map;
bnxt_re_update_gid(rdev);
}
@@ -1138,7 +1345,7 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
struct hwrm_ver_get_input req = {};
struct bnxt_qplib_chip_ctx *cctx;
struct bnxt_fw_msg fw_msg = {};
- int rc = 0;
+ int rc;
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VER_GET);
req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
@@ -1168,7 +1375,7 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
{
- int rc = 0;
+ int rc;
u32 event;
/* Register ib dev */
@@ -1214,8 +1421,11 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
}
- if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags))
- rdev->num_msix = 0;
+
+ rdev->num_msix = 0;
+
+ if (rdev->pacing.dbr_pacing)
+ bnxt_re_deinitialize_dbr_pacing(rdev);
bnxt_re_destroy_chip_ctx(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags))
@@ -1234,15 +1444,14 @@ static void bnxt_re_worker(struct work_struct *work)
static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
{
+ struct bnxt_re_ring_attr rattr = {};
struct bnxt_qplib_creq_ctx *creq;
- struct bnxt_re_ring_attr rattr;
u32 db_offt;
int vid;
u8 type;
int rc;
/* Registered a new RoCE device instance to netdev */
- memset(&rattr, 0, sizeof(rattr));
rc = bnxt_re_register_netdev(rdev);
if (rc) {
ibdev_err(&rdev->ibdev,
@@ -1271,7 +1480,6 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n",
rdev->en_dev->ulp_tbl->msix_requested);
rdev->num_msix = rdev->en_dev->ulp_tbl->msix_requested;
- set_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags);
bnxt_re_query_hwrm_intf_version(rdev);
@@ -1311,8 +1519,17 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
goto free_ring;
}
- rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
- rdev->is_virtfn);
+ if (bnxt_qplib_dbr_pacing_en(rdev->chip_ctx)) {
+ rc = bnxt_re_initialize_dbr_pacing(rdev);
+ if (!rc) {
+ rdev->pacing.dbr_pacing = true;
+ } else {
+ ibdev_err(&rdev->ibdev,
+ "DBR pacing disabled with error : %d\n", rc);
+ rdev->pacing.dbr_pacing = false;
+ }
+ }
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
if (rc)
goto disable_rcfw;
@@ -1400,7 +1617,7 @@ static int bnxt_re_add_device(struct auxiliary_device *adev, u8 wqe_mode)
container_of(adev, struct bnxt_aux_priv, aux_dev);
struct bnxt_en_dev *en_dev;
struct bnxt_re_dev *rdev;
- int rc = 0;
+ int rc;
/* en_dev should never be NULL as long as adev and aux_dev are valid. */
en_dev = aux_priv->edev;
@@ -1646,7 +1863,7 @@ static struct auxiliary_driver bnxt_re_driver = {
static int __init bnxt_re_mod_init(void)
{
- int rc = 0;
+ int rc;
pr_info("%s: %s", ROCE_DRV_MODULE_NAME, version);
rc = auxiliary_driver_register(&bnxt_re_driver);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index a42555623aed..abbabea7f5fa 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -535,7 +535,7 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
cqn_handler_t cqn_handler,
srqn_handler_t srqn_handler)
{
- int rc = -1;
+ int rc;
nq->pdev = pdev;
nq->cqn_handler = cqn_handler;
@@ -727,27 +727,30 @@ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct creq_query_srq_resp resp = {};
struct bnxt_qplib_cmdqmsg msg = {};
- struct bnxt_qplib_rcfw_sbuf *sbuf;
+ struct bnxt_qplib_rcfw_sbuf sbuf;
struct creq_query_srq_resp_sb *sb;
struct cmdq_query_srq req = {};
- int rc = 0;
+ int rc;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_QUERY_SRQ,
sizeof(req));
/* Configure the request */
- sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
- if (!sbuf)
+ sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
return -ENOMEM;
- req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
req.srq_cid = cpu_to_le32(srq->id);
- sb = sbuf->sb;
- bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req),
+ sb = sbuf.sb;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
srq->threshold = le16_to_cpu(sb->srq_limit);
- bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
return rc;
}
@@ -1365,24 +1368,26 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct creq_query_qp_resp resp = {};
struct bnxt_qplib_cmdqmsg msg = {};
- struct bnxt_qplib_rcfw_sbuf *sbuf;
+ struct bnxt_qplib_rcfw_sbuf sbuf;
struct creq_query_qp_resp_sb *sb;
struct cmdq_query_qp req = {};
u32 temp32[4];
- int i, rc = 0;
+ int i, rc;
+
+ sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
+ return -ENOMEM;
+ sb = sbuf.sb;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_QUERY_QP,
sizeof(req));
- sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
- if (!sbuf)
- return -ENOMEM;
- sb = sbuf->sb;
-
req.qp_cid = cpu_to_le32(qp->id);
- req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
- bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req),
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc)
@@ -1391,8 +1396,7 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp->state = sb->en_sqd_async_notify_state &
CREQ_QUERY_QP_RESP_SB_STATE_MASK;
qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
- CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
- true : false;
+ CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
qp->access = sb->access;
qp->pkey_index = le16_to_cpu(sb->pkey);
qp->qkey = le32_to_cpu(sb->qkey);
@@ -1442,7 +1446,8 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
memcpy(qp->smac, sb->src_mac, 6);
qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
bail:
- bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
return rc;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index bc3aea4592b9..e47b4ca64d33 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -55,7 +55,7 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t);
/**
* bnxt_qplib_map_rc - map return type based on opcode
- * @opcode - roce slow path opcode
+ * @opcode: roce slow path opcode
*
* case #1
* Firmware initiated error recovery is a safe state machine and
@@ -98,8 +98,8 @@ static int bnxt_qplib_map_rc(u8 opcode)
/**
* bnxt_re_is_fw_stalled - Check firmware health
- * @rcfw - rcfw channel instance of rdev
- * @cookie - cookie to track the command
+ * @rcfw: rcfw channel instance of rdev
+ * @cookie: cookie to track the command
*
* If firmware has not responded any rcfw command within
* rcfw->max_timeout, consider firmware as stalled.
@@ -133,8 +133,8 @@ static int bnxt_re_is_fw_stalled(struct bnxt_qplib_rcfw *rcfw,
/**
* __wait_for_resp - Don't hold the cpu context and wait for response
- * @rcfw - rcfw channel instance of rdev
- * @cookie - cookie to track the command
+ * @rcfw: rcfw channel instance of rdev
+ * @cookie: cookie to track the command
*
* Wait for command completion in sleepable context.
*
@@ -179,8 +179,8 @@ static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
/**
* __block_for_resp - hold the cpu context and wait for response
- * @rcfw - rcfw channel instance of rdev
- * @cookie - cookie to track the command
+ * @rcfw: rcfw channel instance of rdev
+ * @cookie: cookie to track the command
*
* This function will hold the cpu (non-sleepable context) and
* wait for command completion. Maximum holding interval is 8 second.
@@ -216,8 +216,8 @@ static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
};
/* __send_message_no_waiter - get cookie and post the message.
- * @rcfw - rcfw channel instance of rdev
- * @msg - qplib message internal
+ * @rcfw: rcfw channel instance of rdev
+ * @msg: qplib message internal
*
* This function will just post and don't bother about completion.
* Current design of this function is -
@@ -335,7 +335,8 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
cpu_to_le64(sbuf->dma_addr));
__set_cmdq_base_resp_size(msg->req, msg->req_sz,
ALIGN(sbuf->size,
- BNXT_QPLIB_CMDQE_UNITS));
+ BNXT_QPLIB_CMDQE_UNITS) /
+ BNXT_QPLIB_CMDQE_UNITS);
}
preq = (u8 *)msg->req;
@@ -373,8 +374,8 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
/**
* __poll_for_resp - self poll completion for rcfw command
- * @rcfw - rcfw channel instance of rdev
- * @cookie - cookie to track the command
+ * @rcfw: rcfw channel instance of rdev
+ * @cookie: cookie to track the command
*
* It works same as __wait_for_resp except this function will
* do self polling in sort interval since interrupt is disabled.
@@ -470,8 +471,8 @@ static void __destroy_timedout_ah(struct bnxt_qplib_rcfw *rcfw,
/**
* __bnxt_qplib_rcfw_send_message - qplib interface to send
* and complete rcfw command.
- * @rcfw - rcfw channel instance of rdev
- * @msg - qplib message internal
+ * @rcfw: rcfw channel instance of rdev
+ * @msg: qplib message internal
*
* This function does not account shadow queue depth. It will send
* all the command unconditionally as long as send queue is not full.
@@ -487,7 +488,7 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_crsqe *crsqe;
unsigned long flags;
u16 cookie;
- int rc = 0;
+ int rc;
u8 opcode;
opcode = __get_cmdq_base_opcode(msg->req, msg->req_sz);
@@ -533,8 +534,8 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
/**
* bnxt_qplib_rcfw_send_message - qplib interface to send
* and complete rcfw command.
- * @rcfw - rcfw channel instance of rdev
- * @msg - qplib message internal
+ * @rcfw: rcfw channel instance of rdev
+ * @msg: qplib message internal
*
* Driver interact with Firmware through rcfw channel/slow path in two ways.
* a. Blocking rcfw command send. In this path, driver cannot hold
@@ -664,7 +665,6 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
blocked = cookie & RCFW_CMD_IS_BLOCKING;
cookie &= RCFW_MAX_COOKIE_VALUE;
crsqe = &rcfw->crsqe_tbl[cookie];
- crsqe->is_in_used = false;
if (WARN_ONCE(test_bit(FIRMWARE_STALL_DETECTED,
&rcfw->cmdq.flags),
@@ -680,8 +680,14 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
atomic_dec(&rcfw->timeout_send);
if (crsqe->is_waiter_alive) {
- if (crsqe->resp)
+ if (crsqe->resp) {
memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
+ /* Insert write memory barrier to ensure that
+ * response data is copied before clearing the
+ * flags
+ */
+ smp_wmb();
+ }
if (!blocked)
wait_cmds++;
}
@@ -693,6 +699,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
if (!is_waiter_alive)
crsqe->resp = NULL;
+ crsqe->is_in_used = false;
+
hwq->cons += req_size;
/* This is a case to handle below scenario -
@@ -1195,34 +1203,3 @@ int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw,
return 0;
}
-
-struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
- struct bnxt_qplib_rcfw *rcfw,
- u32 size)
-{
- struct bnxt_qplib_rcfw_sbuf *sbuf;
-
- sbuf = kzalloc(sizeof(*sbuf), GFP_KERNEL);
- if (!sbuf)
- return NULL;
-
- sbuf->size = size;
- sbuf->sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf->size,
- &sbuf->dma_addr, GFP_KERNEL);
- if (!sbuf->sb)
- goto bail;
-
- return sbuf;
-bail:
- kfree(sbuf);
- return NULL;
-}
-
-void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_rcfw_sbuf *sbuf)
-{
- if (sbuf->sb)
- dma_free_coherent(&rcfw->pdev->dev, sbuf->size,
- sbuf->sb, sbuf->dma_addr);
- kfree(sbuf);
-}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 739d942761d1..157db6b7e119 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -118,11 +118,11 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
else
pages = sginfo->npages;
/* page ptr arrays */
- pbl->pg_arr = vmalloc(pages * sizeof(void *));
+ pbl->pg_arr = vmalloc_array(pages, sizeof(void *));
if (!pbl->pg_arr)
return -ENOMEM;
- pbl->pg_map_arr = vmalloc(pages * sizeof(dma_addr_t));
+ pbl->pg_map_arr = vmalloc_array(pages, sizeof(dma_addr_t));
if (!pbl->pg_map_arr) {
vfree(pbl->pg_arr);
pbl->pg_arr = NULL;
@@ -385,7 +385,7 @@ static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
struct bnxt_qplib_hwq_attr hwq_attr = {};
struct bnxt_qplib_sg_info sginfo = {};
struct bnxt_qplib_tqm_ctx *tqmctx;
- int rc = 0;
+ int rc;
int i;
tqmctx = &ctx->tqm_ctx;
@@ -463,7 +463,7 @@ static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx)
static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res,
struct bnxt_qplib_ctx *ctx)
{
- int rc = 0;
+ int rc;
rc = bnxt_qplib_alloc_tqm_rings(res, ctx);
if (rc)
@@ -501,7 +501,7 @@ int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
{
struct bnxt_qplib_hwq_attr hwq_attr = {};
struct bnxt_qplib_sg_info sginfo = {};
- int rc = 0;
+ int rc;
if (virt_fn || is_p5)
goto stats_alloc;
@@ -642,31 +642,44 @@ static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
}
/* PDs */
-int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
+int bnxt_qplib_alloc_pd(struct bnxt_qplib_res *res, struct bnxt_qplib_pd *pd)
{
+ struct bnxt_qplib_pd_tbl *pdt = &res->pd_tbl;
u32 bit_num;
+ int rc = 0;
+ mutex_lock(&res->pd_tbl_lock);
bit_num = find_first_bit(pdt->tbl, pdt->max);
- if (bit_num == pdt->max)
- return -ENOMEM;
+ if (bit_num == pdt->max) {
+ rc = -ENOMEM;
+ goto exit;
+ }
/* Found unused PD */
clear_bit(bit_num, pdt->tbl);
pd->id = bit_num;
- return 0;
+exit:
+ mutex_unlock(&res->pd_tbl_lock);
+ return rc;
}
int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
struct bnxt_qplib_pd_tbl *pdt,
struct bnxt_qplib_pd *pd)
{
+ int rc = 0;
+
+ mutex_lock(&res->pd_tbl_lock);
if (test_and_set_bit(pd->id, pdt->tbl)) {
dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
pd->id);
- return -EINVAL;
+ rc = -EINVAL;
+ goto exit;
}
pd->id = 0;
- return 0;
+exit:
+ mutex_unlock(&res->pd_tbl_lock);
+ return rc;
}
static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
@@ -691,6 +704,7 @@ static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
pdt->max = max;
memset((u8 *)pdt->tbl, 0xFF, bytes);
+ mutex_init(&res->pd_tbl_lock);
return 0;
}
@@ -877,7 +891,7 @@ int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
struct net_device *netdev,
struct bnxt_qplib_dev_attr *dev_attr)
{
- int rc = 0;
+ int rc;
res->pdev = pdev;
res->netdev = netdev;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index d850a553821e..5949f004f785 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -48,6 +48,7 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
struct bnxt_qplib_drv_modes {
u8 wqe_mode;
bool db_push;
+ bool dbr_pacing;
};
struct bnxt_qplib_chip_ctx {
@@ -58,6 +59,17 @@ struct bnxt_qplib_chip_ctx {
u16 hwrm_cmd_max_timeout;
struct bnxt_qplib_drv_modes modes;
u64 hwrm_intf_ver;
+ u32 dbr_stat_db_fifo;
+};
+
+struct bnxt_qplib_db_pacing_data {
+ u32 do_pacing;
+ u32 pacing_th;
+ u32 alarm_th;
+ u32 fifo_max_depth;
+ u32 fifo_room_mask;
+ u32 fifo_room_shift;
+ u32 grc_reg_offset;
};
#define BNXT_QPLIB_DBR_PF_DB_OFFSET 0x10000
@@ -265,12 +277,15 @@ struct bnxt_qplib_res {
struct net_device *netdev;
struct bnxt_qplib_rcfw *rcfw;
struct bnxt_qplib_pd_tbl pd_tbl;
+ /* To protect the pd table bit map */
+ struct mutex pd_tbl_lock;
struct bnxt_qplib_sgid_tbl sgid_tbl;
struct bnxt_qplib_dpi_tbl dpi_tbl;
/* To protect the dpi table bit map */
struct mutex dpi_tbl_lock;
bool prio;
bool is_vf;
+ struct bnxt_qplib_db_pacing_data *pacing_data;
};
static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
@@ -355,7 +370,7 @@ void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
struct bnxt_qplib_hwq *hwq);
int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
struct bnxt_qplib_hwq_attr *hwq_attr);
-int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pd_tbl,
+int bnxt_qplib_alloc_pd(struct bnxt_qplib_res *res,
struct bnxt_qplib_pd *pd);
int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
struct bnxt_qplib_pd_tbl *pd_tbl,
@@ -467,4 +482,10 @@ static inline bool _is_ext_stats_supported(u16 dev_cap_flags)
return dev_cap_flags &
CREQ_QUERY_FUNC_RESP_SB_EXT_STATS;
}
+
+static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx)
+{
+ return cctx->modes.dbr_pacing;
+}
+
#endif /* __BNXT_QPLIB_RES_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index ab45f9d4bb02..a27b68515164 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -72,7 +72,7 @@ static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
struct creq_query_version_resp resp = {};
struct bnxt_qplib_cmdqmsg msg = {};
struct cmdq_query_version req = {};
- int rc = 0;
+ int rc;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_QUERY_VERSION,
@@ -89,31 +89,29 @@ static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
}
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_dev_attr *attr, bool vf)
+ struct bnxt_qplib_dev_attr *attr)
{
struct creq_query_func_resp resp = {};
struct bnxt_qplib_cmdqmsg msg = {};
struct creq_query_func_resp_sb *sb;
- struct bnxt_qplib_rcfw_sbuf *sbuf;
+ struct bnxt_qplib_rcfw_sbuf sbuf;
struct cmdq_query_func req = {};
u8 *tqm_alloc;
- int i, rc = 0;
+ int i, rc;
u32 temp;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_QUERY_FUNC,
sizeof(req));
- sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
- if (!sbuf) {
- dev_err(&rcfw->pdev->dev,
- "SP: QUERY_FUNC alloc side buffer failed\n");
+ sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
return -ENOMEM;
- }
-
- sb = sbuf->sb;
- req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
- bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req),
+ sb = sbuf.sb;
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc)
@@ -121,9 +119,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
/* Extract the context from the side buffer */
attr->max_qp = le32_to_cpu(sb->max_qp);
- /* max_qp value reported by FW for PF doesn't include the QP1 for PF */
- if (!vf)
- attr->max_qp += 1;
+ /* max_qp value reported by FW doesn't include the QP1 */
+ attr->max_qp += 1;
attr->max_qp_rd_atom =
sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
@@ -175,7 +172,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw);
bail:
- bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
return rc;
}
@@ -186,7 +184,7 @@ int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
struct creq_set_func_resources_resp resp = {};
struct cmdq_set_func_resources req = {};
struct bnxt_qplib_cmdqmsg msg = {};
- int rc = 0;
+ int rc;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_SET_FUNC_RESOURCES,
@@ -718,23 +716,22 @@ int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
struct creq_query_roce_stats_resp_sb *sb;
struct cmdq_query_roce_stats req = {};
struct bnxt_qplib_cmdqmsg msg = {};
- struct bnxt_qplib_rcfw_sbuf *sbuf;
- int rc = 0;
+ struct bnxt_qplib_rcfw_sbuf sbuf;
+ int rc;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_QUERY_ROCE_STATS,
sizeof(req));
- sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
- if (!sbuf) {
- dev_err(&rcfw->pdev->dev,
- "SP: QUERY_ROCE_STATS alloc side buffer failed\n");
+ sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
return -ENOMEM;
- }
+ sb = sbuf.sb;
- sb = sbuf->sb;
- req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
- bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req),
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc)
@@ -790,7 +787,8 @@ int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
}
bail:
- bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
return rc;
}
@@ -801,49 +799,56 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
struct creq_query_roce_stats_ext_resp_sb *sb;
struct cmdq_query_roce_stats_ext req = {};
struct bnxt_qplib_cmdqmsg msg = {};
- struct bnxt_qplib_rcfw_sbuf *sbuf;
+ struct bnxt_qplib_rcfw_sbuf sbuf;
int rc;
- sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
- if (!sbuf) {
- dev_err(&rcfw->pdev->dev,
- "SP: QUERY_ROCE_STATS_EXT alloc sb failed");
+ sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
return -ENOMEM;
- }
+ sb = sbuf.sb;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS,
sizeof(req));
- req.resp_size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
- req.resp_addr = cpu_to_le64(sbuf->dma_addr);
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ req.resp_addr = cpu_to_le64(sbuf.dma_addr);
req.function_id = cpu_to_le32(fid);
req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID);
- bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req),
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc)
goto bail;
- sb = sbuf->sb;
estat->tx_atomic_req = le64_to_cpu(sb->tx_atomic_req_pkts);
estat->tx_read_req = le64_to_cpu(sb->tx_read_req_pkts);
estat->tx_read_res = le64_to_cpu(sb->tx_read_res_pkts);
estat->tx_write_req = le64_to_cpu(sb->tx_write_req_pkts);
estat->tx_send_req = le64_to_cpu(sb->tx_send_req_pkts);
+ estat->tx_roce_pkts = le64_to_cpu(sb->tx_roce_pkts);
+ estat->tx_roce_bytes = le64_to_cpu(sb->tx_roce_bytes);
estat->rx_atomic_req = le64_to_cpu(sb->rx_atomic_req_pkts);
estat->rx_read_req = le64_to_cpu(sb->rx_read_req_pkts);
estat->rx_read_res = le64_to_cpu(sb->rx_read_res_pkts);
estat->rx_write_req = le64_to_cpu(sb->rx_write_req_pkts);
estat->rx_send_req = le64_to_cpu(sb->rx_send_req_pkts);
+ estat->rx_roce_pkts = le64_to_cpu(sb->rx_roce_pkts);
+ estat->rx_roce_bytes = le64_to_cpu(sb->rx_roce_bytes);
estat->rx_roce_good_pkts = le64_to_cpu(sb->rx_roce_good_pkts);
estat->rx_roce_good_bytes = le64_to_cpu(sb->rx_roce_good_bytes);
estat->rx_out_of_buffer = le64_to_cpu(sb->rx_out_of_buffer_pkts);
estat->rx_out_of_sequence = le64_to_cpu(sb->rx_out_of_sequence_pkts);
+ estat->tx_cnp = le64_to_cpu(sb->tx_cnp_pkts);
+ estat->rx_cnp = le64_to_cpu(sb->rx_cnp_pkts);
+ estat->rx_ecn_marked = le64_to_cpu(sb->rx_ecn_marked_pkts);
bail:
- bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
return rc;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 264ef3cedc45..d33c78b96217 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -322,7 +322,7 @@ int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 gid_idx,
const u8 *smac);
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_dev_attr *attr, bool vf);
+ struct bnxt_qplib_dev_attr *attr);
int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index ced615b5ea09..040ba2224f9f 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1965,6 +1965,9 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
int win;
skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
req = __skb_put_zero(skb, sizeof(*req));
req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index ffbd9a89981e..d16d8eaa1415 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2466,7 +2466,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
- init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
+ init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
return 0;
}
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index 4e93ef7f84ee..9c65bd27bae0 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -66,6 +66,7 @@ enum efa_admin_get_stats_type {
EFA_ADMIN_GET_STATS_TYPE_BASIC = 0,
EFA_ADMIN_GET_STATS_TYPE_MESSAGES = 1,
EFA_ADMIN_GET_STATS_TYPE_RDMA_READ = 2,
+ EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE = 3,
};
enum efa_admin_get_stats_scope {
@@ -570,6 +571,16 @@ struct efa_admin_rdma_read_stats {
u64 read_resp_bytes;
};
+struct efa_admin_rdma_write_stats {
+ u64 write_wrs;
+
+ u64 write_bytes;
+
+ u64 write_wr_err;
+
+ u64 write_recv_bytes;
+};
+
struct efa_admin_acq_get_stats_resp {
struct efa_admin_acq_common_desc acq_common_desc;
@@ -579,6 +590,8 @@ struct efa_admin_acq_get_stats_resp {
struct efa_admin_messages_stats messages_stats;
struct efa_admin_rdma_read_stats rdma_read_stats;
+
+ struct efa_admin_rdma_write_stats rdma_write_stats;
} u;
};
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index 8f8885e002ba..576811885d59 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "efa_com.h"
@@ -794,6 +794,12 @@ int efa_com_get_stats(struct efa_com_dev *edev,
result->rdma_read_stats.read_wr_err = resp.u.rdma_read_stats.read_wr_err;
result->rdma_read_stats.read_resp_bytes = resp.u.rdma_read_stats.read_resp_bytes;
break;
+ case EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE:
+ result->rdma_write_stats.write_wrs = resp.u.rdma_write_stats.write_wrs;
+ result->rdma_write_stats.write_bytes = resp.u.rdma_write_stats.write_bytes;
+ result->rdma_write_stats.write_wr_err = resp.u.rdma_write_stats.write_wr_err;
+ result->rdma_write_stats.write_recv_bytes = resp.u.rdma_write_stats.write_recv_bytes;
+ break;
}
return 0;
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
index 0898ad5bc340..fc97f37bb39b 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.h
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COM_CMD_H_
@@ -262,10 +262,18 @@ struct efa_com_rdma_read_stats {
u64 read_resp_bytes;
};
+struct efa_com_rdma_write_stats {
+ u64 write_wrs;
+ u64 write_bytes;
+ u64 write_wr_err;
+ u64 write_recv_bytes;
+};
+
union efa_com_get_stats_result {
struct efa_com_basic_stats basic_stats;
struct efa_com_messages_stats messages_stats;
struct efa_com_rdma_read_stats rdma_read_stats;
+ struct efa_com_rdma_write_stats rdma_write_stats;
};
int efa_com_create_qp(struct efa_com_dev *edev,
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 2a195c4b0f17..0f8ca99d0827 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -61,6 +61,10 @@ struct efa_user_mmap_entry {
op(EFA_RDMA_READ_BYTES, "rdma_read_bytes") \
op(EFA_RDMA_READ_WR_ERR, "rdma_read_wr_err") \
op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \
+ op(EFA_RDMA_WRITE_WRS, "rdma_write_wrs") \
+ op(EFA_RDMA_WRITE_BYTES, "rdma_write_bytes") \
+ op(EFA_RDMA_WRITE_WR_ERR, "rdma_write_wr_err") \
+ op(EFA_RDMA_WRITE_RECV_BYTES, "rdma_write_recv_bytes") \
#define EFA_STATS_ENUM(ename, name) ename,
#define EFA_STATS_STR(ename, nam) \
@@ -449,12 +453,12 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
- efa_qp_user_mmap_entries_remove(qp);
-
err = efa_destroy_qp_handle(dev, qp->qp_handle);
if (err)
return err;
+ efa_qp_user_mmap_entries_remove(qp);
+
if (qp->rq_cpu_addr) {
ibdev_dbg(&dev->ibdev,
"qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
@@ -1013,8 +1017,8 @@ int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
"Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
- efa_cq_user_mmap_entries_remove(cq);
efa_destroy_cq_idx(dev, cq->cq_idx);
+ efa_cq_user_mmap_entries_remove(cq);
if (cq->eq) {
xa_erase(&dev->cqs_xa, cq->cq_idx);
synchronize_irq(cq->eq->irq.irqn);
@@ -2080,6 +2084,7 @@ static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats,
{
struct efa_com_get_stats_params params = {};
union efa_com_get_stats_result result;
+ struct efa_com_rdma_write_stats *rws;
struct efa_com_rdma_read_stats *rrs;
struct efa_com_messages_stats *ms;
struct efa_com_basic_stats *bs;
@@ -2121,6 +2126,19 @@ static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats,
stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err;
stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes;
+ if (EFA_DEV_CAP(dev, RDMA_WRITE)) {
+ params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE;
+ err = efa_com_get_stats(&dev->edev, &params, &result);
+ if (err)
+ return err;
+
+ rws = &result.rdma_write_stats;
+ stats->value[EFA_RDMA_WRITE_WRS] = rws->write_wrs;
+ stats->value[EFA_RDMA_WRITE_BYTES] = rws->write_bytes;
+ stats->value[EFA_RDMA_WRITE_WR_ERR] = rws->write_wr_err;
+ stats->value[EFA_RDMA_WRITE_RECV_BYTES] = rws->write_recv_bytes;
+ }
+
return ARRAY_SIZE(efa_port_stats_descs);
}
diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
index a882b57aa118..9d316fdc6f9a 100644
--- a/drivers/infiniband/hw/erdma/erdma_hw.h
+++ b/drivers/infiniband/hw/erdma/erdma_hw.h
@@ -228,7 +228,7 @@ struct erdma_cmdq_ext_db_req {
/* create_cq cfg1 */
#define ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK GENMASK(31, 16)
-#define ERDMA_CMD_CREATE_CQ_MTT_TYPE_MASK BIT(15)
+#define ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK BIT(15)
#define ERDMA_CMD_CREATE_CQ_MTT_DB_CFG_MASK BIT(11)
#define ERDMA_CMD_CREATE_CQ_EQN_MASK GENMASK(9, 0)
@@ -248,6 +248,7 @@ struct erdma_cmdq_create_cq_req {
/* regmr/deregmr cfg0 */
#define ERDMA_CMD_MR_VALID_MASK BIT(31)
+#define ERDMA_CMD_MR_VERSION_MASK GENMASK(30, 28)
#define ERDMA_CMD_MR_KEY_MASK GENMASK(27, 20)
#define ERDMA_CMD_MR_MPT_IDX_MASK GENMASK(19, 0)
@@ -258,7 +259,8 @@ struct erdma_cmdq_create_cq_req {
/* regmr cfg2 */
#define ERDMA_CMD_REGMR_PAGESIZE_MASK GENMASK(31, 27)
-#define ERDMA_CMD_REGMR_MTT_TYPE_MASK GENMASK(21, 20)
+#define ERDMA_CMD_REGMR_MTT_PAGESIZE_MASK GENMASK(26, 24)
+#define ERDMA_CMD_REGMR_MTT_LEVEL_MASK GENMASK(21, 20)
#define ERDMA_CMD_REGMR_MTT_CNT_MASK GENMASK(19, 0)
struct erdma_cmdq_reg_mr_req {
@@ -268,7 +270,14 @@ struct erdma_cmdq_reg_mr_req {
u64 start_va;
u32 size;
u32 cfg2;
- u64 phy_addr[4];
+ union {
+ u64 phy_addr[4];
+ struct {
+ u64 rsvd;
+ u32 size_h;
+ u32 mtt_cnt_h;
+ };
+ };
};
struct erdma_cmdq_dereg_mr_req {
@@ -309,7 +318,7 @@ struct erdma_cmdq_modify_qp_req {
/* create qp mtt_cfg */
#define ERDMA_CMD_CREATE_QP_PAGE_OFFSET_MASK GENMASK(31, 12)
#define ERDMA_CMD_CREATE_QP_MTT_CNT_MASK GENMASK(11, 1)
-#define ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK BIT(0)
+#define ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK BIT(0)
/* create qp db cfg */
#define ERDMA_CMD_CREATE_QP_SQDB_CFG_MASK GENMASK(31, 16)
@@ -364,6 +373,7 @@ struct erdma_cmdq_reflush_req {
enum {
ERDMA_DEV_CAP_FLAGS_ATOMIC = 1 << 7,
+ ERDMA_DEV_CAP_FLAGS_MTT_VA = 1 << 5,
ERDMA_DEV_CAP_FLAGS_EXTEND_DB = 1 << 3,
};
diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c
index 44923c51a01b..6d0330badd68 100644
--- a/drivers/infiniband/hw/erdma/erdma_qp.c
+++ b/drivers/infiniband/hw/erdma/erdma_qp.c
@@ -410,7 +410,7 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
/* Copy SGLs to SQE content to accelerate */
memcpy(get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
qp->attrs.sq_size, SQEBB_SHIFT),
- mr->mem.mtt_buf, MTT_SIZE(mr->mem.mtt_nents));
+ mr->mem.mtt->buf, MTT_SIZE(mr->mem.mtt_nents));
wqe_size = sizeof(struct erdma_reg_mr_sqe) +
MTT_SIZE(mr->mem.mtt_nents);
} else {
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index 517676fbb8b1..c317947563fb 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -19,6 +19,23 @@
#include "erdma_cm.h"
#include "erdma_verbs.h"
+static void assemble_qbuf_mtt_for_cmd(struct erdma_mem *mem, u32 *cfg,
+ u64 *addr0, u64 *addr1)
+{
+ struct erdma_mtt *mtt = mem->mtt;
+
+ if (mem->mtt_nents > ERDMA_MAX_INLINE_MTT_ENTRIES) {
+ *addr0 = mtt->buf_dma;
+ *cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK,
+ ERDMA_MR_MTT_1LEVEL);
+ } else {
+ *addr0 = mtt->buf[0];
+ memcpy(addr1, mtt->buf + 1, MTT_SIZE(mem->mtt_nents - 1));
+ *cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK,
+ ERDMA_MR_MTT_0LEVEL);
+ }
+}
+
static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
{
struct erdma_dev *dev = to_edev(qp->ibqp.device);
@@ -53,8 +70,8 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
req.sq_mtt_cfg =
FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_OFFSET_MASK, 0) |
FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK, 1) |
- FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK,
- ERDMA_MR_INLINE_MTT);
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK,
+ ERDMA_MR_MTT_0LEVEL);
req.rq_mtt_cfg = req.sq_mtt_cfg;
req.rq_buf_addr = qp->kern_qp.rq_buf_dma_addr;
@@ -67,30 +84,28 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
user_qp = &qp->user_qp;
req.sq_cqn_mtt_cfg = FIELD_PREP(
ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
- ilog2(user_qp->sq_mtt.page_size) - ERDMA_HW_PAGE_SHIFT);
+ ilog2(user_qp->sq_mem.page_size) - ERDMA_HW_PAGE_SHIFT);
req.sq_cqn_mtt_cfg |=
FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn);
req.rq_cqn_mtt_cfg = FIELD_PREP(
ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
- ilog2(user_qp->rq_mtt.page_size) - ERDMA_HW_PAGE_SHIFT);
+ ilog2(user_qp->rq_mem.page_size) - ERDMA_HW_PAGE_SHIFT);
req.rq_cqn_mtt_cfg |=
FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn);
- req.sq_mtt_cfg = user_qp->sq_mtt.page_offset;
+ req.sq_mtt_cfg = user_qp->sq_mem.page_offset;
req.sq_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK,
- user_qp->sq_mtt.mtt_nents) |
- FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK,
- user_qp->sq_mtt.mtt_type);
+ user_qp->sq_mem.mtt_nents);
- req.rq_mtt_cfg = user_qp->rq_mtt.page_offset;
+ req.rq_mtt_cfg = user_qp->rq_mem.page_offset;
req.rq_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK,
- user_qp->rq_mtt.mtt_nents) |
- FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK,
- user_qp->rq_mtt.mtt_type);
+ user_qp->rq_mem.mtt_nents);
- req.sq_buf_addr = user_qp->sq_mtt.mtt_entry[0];
- req.rq_buf_addr = user_qp->rq_mtt.mtt_entry[0];
+ assemble_qbuf_mtt_for_cmd(&user_qp->sq_mem, &req.sq_mtt_cfg,
+ &req.sq_buf_addr, req.sq_mtt_entry);
+ assemble_qbuf_mtt_for_cmd(&user_qp->rq_mem, &req.rq_mtt_cfg,
+ &req.rq_buf_addr, req.rq_mtt_entry);
req.sq_db_info_dma_addr = user_qp->sq_db_info_dma_addr;
req.rq_db_info_dma_addr = user_qp->rq_db_info_dma_addr;
@@ -117,13 +132,26 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
{
- struct erdma_cmdq_reg_mr_req req;
struct erdma_pd *pd = to_epd(mr->ibmr.pd);
- u64 *phy_addr;
- int i;
+ u32 mtt_level = ERDMA_MR_MTT_0LEVEL;
+ struct erdma_cmdq_reg_mr_req req;
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, CMDQ_OPCODE_REG_MR);
+ if (mr->type == ERDMA_MR_TYPE_FRMR ||
+ mr->mem.page_cnt > ERDMA_MAX_INLINE_MTT_ENTRIES) {
+ if (mr->mem.mtt->continuous) {
+ req.phy_addr[0] = mr->mem.mtt->buf_dma;
+ mtt_level = ERDMA_MR_MTT_1LEVEL;
+ } else {
+ req.phy_addr[0] = sg_dma_address(mr->mem.mtt->sglist);
+ mtt_level = mr->mem.mtt->level;
+ }
+ } else if (mr->type != ERDMA_MR_TYPE_DMA) {
+ memcpy(req.phy_addr, mr->mem.mtt->buf,
+ MTT_SIZE(mr->mem.page_cnt));
+ }
+
req.cfg0 = FIELD_PREP(ERDMA_CMD_MR_VALID_MASK, mr->valid) |
FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, mr->ibmr.lkey & 0xFF) |
FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, mr->ibmr.lkey >> 8);
@@ -132,7 +160,7 @@ static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
FIELD_PREP(ERDMA_CMD_REGMR_RIGHT_MASK, mr->access);
req.cfg2 = FIELD_PREP(ERDMA_CMD_REGMR_PAGESIZE_MASK,
ilog2(mr->mem.page_size)) |
- FIELD_PREP(ERDMA_CMD_REGMR_MTT_TYPE_MASK, mr->mem.mtt_type) |
+ FIELD_PREP(ERDMA_CMD_REGMR_MTT_LEVEL_MASK, mtt_level) |
FIELD_PREP(ERDMA_CMD_REGMR_MTT_CNT_MASK, mr->mem.page_cnt);
if (mr->type == ERDMA_MR_TYPE_DMA)
@@ -143,14 +171,12 @@ static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
req.size = mr->mem.len;
}
- if (mr->type == ERDMA_MR_TYPE_FRMR ||
- mr->mem.mtt_type == ERDMA_MR_INDIRECT_MTT) {
- phy_addr = req.phy_addr;
- *phy_addr = mr->mem.mtt_entry[0];
- } else {
- phy_addr = req.phy_addr;
- for (i = 0; i < mr->mem.mtt_nents; i++)
- *phy_addr++ = mr->mem.mtt_entry[i];
+ if (!mr->mem.mtt->continuous && mr->mem.mtt->level > 1) {
+ req.cfg0 |= FIELD_PREP(ERDMA_CMD_MR_VERSION_MASK, 1);
+ req.cfg2 |= FIELD_PREP(ERDMA_CMD_REGMR_MTT_PAGESIZE_MASK,
+ PAGE_SHIFT - ERDMA_HW_PAGE_SHIFT);
+ req.size_h = upper_32_bits(mr->mem.len);
+ req.mtt_cnt_h = mr->mem.page_cnt >> 20;
}
post_cmd:
@@ -161,7 +187,7 @@ static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
{
struct erdma_dev *dev = to_edev(cq->ibcq.device);
struct erdma_cmdq_create_cq_req req;
- struct erdma_mem *mtt;
+ struct erdma_mem *mem;
u32 page_size;
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
@@ -179,30 +205,34 @@ static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
req.qbuf_addr_h = upper_32_bits(cq->kern_cq.qbuf_dma_addr);
req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK, 1) |
- FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_TYPE_MASK,
- ERDMA_MR_INLINE_MTT);
+ FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK,
+ ERDMA_MR_MTT_0LEVEL);
req.first_page_offset = 0;
req.cq_db_info_addr =
cq->kern_cq.qbuf_dma_addr + (cq->depth << CQE_SHIFT);
} else {
- mtt = &cq->user_cq.qbuf_mtt;
+ mem = &cq->user_cq.qbuf_mem;
req.cfg0 |=
FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
- ilog2(mtt->page_size) - ERDMA_HW_PAGE_SHIFT);
- if (mtt->mtt_nents == 1) {
- req.qbuf_addr_l = lower_32_bits(*(u64 *)mtt->mtt_buf);
- req.qbuf_addr_h = upper_32_bits(*(u64 *)mtt->mtt_buf);
+ ilog2(mem->page_size) - ERDMA_HW_PAGE_SHIFT);
+ if (mem->mtt_nents == 1) {
+ req.qbuf_addr_l = lower_32_bits(mem->mtt->buf[0]);
+ req.qbuf_addr_h = upper_32_bits(mem->mtt->buf[0]);
+ req.cfg1 |=
+ FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK,
+ ERDMA_MR_MTT_0LEVEL);
} else {
- req.qbuf_addr_l = lower_32_bits(mtt->mtt_entry[0]);
- req.qbuf_addr_h = upper_32_bits(mtt->mtt_entry[0]);
+ req.qbuf_addr_l = lower_32_bits(mem->mtt->buf_dma);
+ req.qbuf_addr_h = upper_32_bits(mem->mtt->buf_dma);
+ req.cfg1 |=
+ FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK,
+ ERDMA_MR_MTT_1LEVEL);
}
req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK,
- mtt->mtt_nents);
- req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_TYPE_MASK,
- mtt->mtt_type);
+ mem->mtt_nents);
- req.first_page_offset = mtt->page_offset;
+ req.first_page_offset = mem->page_offset;
req.cq_db_info_addr = cq->user_cq.db_info_dma_addr;
if (uctx->ext_db.enable) {
@@ -481,8 +511,8 @@ static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp,
dev->func_bar + (ERDMA_SDB_SHARED_PAGE_INDEX << PAGE_SHIFT);
kqp->hw_rq_db = dev->func_bar + ERDMA_BAR_RQDB_SPACE_OFFSET;
- kqp->swr_tbl = vmalloc(qp->attrs.sq_size * sizeof(u64));
- kqp->rwr_tbl = vmalloc(qp->attrs.rq_size * sizeof(u64));
+ kqp->swr_tbl = vmalloc_array(qp->attrs.sq_size, sizeof(u64));
+ kqp->rwr_tbl = vmalloc_array(qp->attrs.rq_size, sizeof(u64));
if (!kqp->swr_tbl || !kqp->rwr_tbl)
goto err_out;
@@ -508,12 +538,223 @@ err_out:
return -ENOMEM;
}
+static void erdma_fill_bottom_mtt(struct erdma_dev *dev, struct erdma_mem *mem)
+{
+ struct erdma_mtt *mtt = mem->mtt;
+ struct ib_block_iter biter;
+ u32 idx = 0;
+
+ while (mtt->low_level)
+ mtt = mtt->low_level;
+
+ rdma_umem_for_each_dma_block(mem->umem, &biter, mem->page_size)
+ mtt->buf[idx++] = rdma_block_iter_dma_address(&biter);
+}
+
+static struct erdma_mtt *erdma_create_cont_mtt(struct erdma_dev *dev,
+ size_t size)
+{
+ struct erdma_mtt *mtt;
+
+ mtt = kzalloc(sizeof(*mtt), GFP_KERNEL);
+ if (!mtt)
+ return ERR_PTR(-ENOMEM);
+
+ mtt->size = size;
+ mtt->buf = kzalloc(mtt->size, GFP_KERNEL);
+ if (!mtt->buf)
+ goto err_free_mtt;
+
+ mtt->continuous = true;
+ mtt->buf_dma = dma_map_single(&dev->pdev->dev, mtt->buf, mtt->size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pdev->dev, mtt->buf_dma))
+ goto err_free_mtt_buf;
+
+ return mtt;
+
+err_free_mtt_buf:
+ kfree(mtt->buf);
+
+err_free_mtt:
+ kfree(mtt);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+static void erdma_destroy_mtt_buf_sg(struct erdma_dev *dev,
+ struct erdma_mtt *mtt)
+{
+ dma_unmap_sg(&dev->pdev->dev, mtt->sglist, mtt->nsg, DMA_TO_DEVICE);
+ vfree(mtt->sglist);
+}
+
+static void erdma_destroy_scatter_mtt(struct erdma_dev *dev,
+ struct erdma_mtt *mtt)
+{
+ erdma_destroy_mtt_buf_sg(dev, mtt);
+ vfree(mtt->buf);
+ kfree(mtt);
+}
+
+static void erdma_init_middle_mtt(struct erdma_mtt *mtt,
+ struct erdma_mtt *low_mtt)
+{
+ struct scatterlist *sg;
+ u32 idx = 0, i;
+
+ for_each_sg(low_mtt->sglist, sg, low_mtt->nsg, i)
+ mtt->buf[idx++] = sg_dma_address(sg);
+}
+
+static int erdma_create_mtt_buf_sg(struct erdma_dev *dev, struct erdma_mtt *mtt)
+{
+ struct scatterlist *sglist;
+ void *buf = mtt->buf;
+ u32 npages, i, nsg;
+ struct page *pg;
+
+ /* Failed if buf is not page aligned */
+ if ((uintptr_t)buf & ~PAGE_MASK)
+ return -EINVAL;
+
+ npages = DIV_ROUND_UP(mtt->size, PAGE_SIZE);
+ sglist = vzalloc(npages * sizeof(*sglist));
+ if (!sglist)
+ return -ENOMEM;
+
+ sg_init_table(sglist, npages);
+ for (i = 0; i < npages; i++) {
+ pg = vmalloc_to_page(buf);
+ if (!pg)
+ goto err;
+ sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
+ buf += PAGE_SIZE;
+ }
+
+ nsg = dma_map_sg(&dev->pdev->dev, sglist, npages, DMA_TO_DEVICE);
+ if (!nsg)
+ goto err;
+
+ mtt->sglist = sglist;
+ mtt->nsg = nsg;
+
+ return 0;
+err:
+ vfree(sglist);
+
+ return -ENOMEM;
+}
+
+static struct erdma_mtt *erdma_create_scatter_mtt(struct erdma_dev *dev,
+ size_t size)
+{
+ struct erdma_mtt *mtt;
+ int ret = -ENOMEM;
+
+ mtt = kzalloc(sizeof(*mtt), GFP_KERNEL);
+ if (!mtt)
+ return ERR_PTR(-ENOMEM);
+
+ mtt->size = ALIGN(size, PAGE_SIZE);
+ mtt->buf = vzalloc(mtt->size);
+ mtt->continuous = false;
+ if (!mtt->buf)
+ goto err_free_mtt;
+
+ ret = erdma_create_mtt_buf_sg(dev, mtt);
+ if (ret)
+ goto err_free_mtt_buf;
+
+ ibdev_dbg(&dev->ibdev, "create scatter mtt, size:%lu, nsg:%u\n",
+ mtt->size, mtt->nsg);
+
+ return mtt;
+
+err_free_mtt_buf:
+ vfree(mtt->buf);
+
+err_free_mtt:
+ kfree(mtt);
+
+ return ERR_PTR(ret);
+}
+
+static struct erdma_mtt *erdma_create_mtt(struct erdma_dev *dev, size_t size,
+ bool force_continuous)
+{
+ struct erdma_mtt *mtt, *tmp_mtt;
+ int ret, level = 0;
+
+ ibdev_dbg(&dev->ibdev, "create_mtt, size:%lu, force cont:%d\n", size,
+ force_continuous);
+
+ if (!(dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_MTT_VA))
+ force_continuous = true;
+
+ if (force_continuous)
+ return erdma_create_cont_mtt(dev, size);
+
+ mtt = erdma_create_scatter_mtt(dev, size);
+ if (IS_ERR(mtt))
+ return mtt;
+ level = 1;
+
+ /* convergence the mtt table. */
+ while (mtt->nsg != 1 && level <= 3) {
+ tmp_mtt = erdma_create_scatter_mtt(dev, MTT_SIZE(mtt->nsg));
+ if (IS_ERR(tmp_mtt)) {
+ ret = PTR_ERR(tmp_mtt);
+ goto err_free_mtt;
+ }
+ erdma_init_middle_mtt(tmp_mtt, mtt);
+ tmp_mtt->low_level = mtt;
+ mtt = tmp_mtt;
+ level++;
+ }
+
+ if (level > 3) {
+ ret = -ENOMEM;
+ goto err_free_mtt;
+ }
+
+ mtt->level = level;
+ ibdev_dbg(&dev->ibdev, "top mtt: level:%d, dma_addr 0x%llx\n",
+ mtt->level, mtt->sglist[0].dma_address);
+
+ return mtt;
+err_free_mtt:
+ while (mtt) {
+ tmp_mtt = mtt->low_level;
+ erdma_destroy_scatter_mtt(dev, mtt);
+ mtt = tmp_mtt;
+ }
+
+ return ERR_PTR(ret);
+}
+
+static void erdma_destroy_mtt(struct erdma_dev *dev, struct erdma_mtt *mtt)
+{
+ struct erdma_mtt *tmp_mtt;
+
+ if (mtt->continuous) {
+ dma_unmap_single(&dev->pdev->dev, mtt->buf_dma, mtt->size,
+ DMA_TO_DEVICE);
+ kfree(mtt->buf);
+ kfree(mtt);
+ } else {
+ while (mtt) {
+ tmp_mtt = mtt->low_level;
+ erdma_destroy_scatter_mtt(dev, mtt);
+ mtt = tmp_mtt;
+ }
+ }
+}
+
static int get_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem,
u64 start, u64 len, int access, u64 virt,
- unsigned long req_page_size, u8 force_indirect_mtt)
+ unsigned long req_page_size, bool force_continuous)
{
- struct ib_block_iter biter;
- uint64_t *phy_addr = NULL;
int ret = 0;
mem->umem = ib_umem_get(&dev->ibdev, start, len, access);
@@ -529,38 +770,14 @@ static int get_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem,
mem->page_offset = start & (mem->page_size - 1);
mem->mtt_nents = ib_umem_num_dma_blocks(mem->umem, mem->page_size);
mem->page_cnt = mem->mtt_nents;
-
- if (mem->page_cnt > ERDMA_MAX_INLINE_MTT_ENTRIES ||
- force_indirect_mtt) {
- mem->mtt_type = ERDMA_MR_INDIRECT_MTT;
- mem->mtt_buf =
- alloc_pages_exact(MTT_SIZE(mem->page_cnt), GFP_KERNEL);
- if (!mem->mtt_buf) {
- ret = -ENOMEM;
- goto error_ret;
- }
- phy_addr = mem->mtt_buf;
- } else {
- mem->mtt_type = ERDMA_MR_INLINE_MTT;
- phy_addr = mem->mtt_entry;
+ mem->mtt = erdma_create_mtt(dev, MTT_SIZE(mem->page_cnt),
+ force_continuous);
+ if (IS_ERR(mem->mtt)) {
+ ret = PTR_ERR(mem->mtt);
+ goto error_ret;
}
- rdma_umem_for_each_dma_block(mem->umem, &biter, mem->page_size) {
- *phy_addr = rdma_block_iter_dma_address(&biter);
- phy_addr++;
- }
-
- if (mem->mtt_type == ERDMA_MR_INDIRECT_MTT) {
- mem->mtt_entry[0] =
- dma_map_single(&dev->pdev->dev, mem->mtt_buf,
- MTT_SIZE(mem->page_cnt), DMA_TO_DEVICE);
- if (dma_mapping_error(&dev->pdev->dev, mem->mtt_entry[0])) {
- free_pages_exact(mem->mtt_buf, MTT_SIZE(mem->page_cnt));
- mem->mtt_buf = NULL;
- ret = -ENOMEM;
- goto error_ret;
- }
- }
+ erdma_fill_bottom_mtt(dev, mem);
return 0;
@@ -575,11 +792,8 @@ error_ret:
static void put_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem)
{
- if (mem->mtt_buf) {
- dma_unmap_single(&dev->pdev->dev, mem->mtt_entry[0],
- MTT_SIZE(mem->page_cnt), DMA_TO_DEVICE);
- free_pages_exact(mem->mtt_buf, MTT_SIZE(mem->page_cnt));
- }
+ if (mem->mtt)
+ erdma_destroy_mtt(dev, mem->mtt);
if (mem->umem) {
ib_umem_release(mem->umem);
@@ -660,18 +874,18 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
qp->attrs.rq_size * RQE_SIZE))
return -EINVAL;
- ret = get_mtt_entries(qp->dev, &qp->user_qp.sq_mtt, va,
+ ret = get_mtt_entries(qp->dev, &qp->user_qp.sq_mem, va,
qp->attrs.sq_size << SQEBB_SHIFT, 0, va,
- (SZ_1M - SZ_4K), 1);
+ (SZ_1M - SZ_4K), true);
if (ret)
return ret;
rq_offset = ALIGN(qp->attrs.sq_size << SQEBB_SHIFT, ERDMA_HW_PAGE_SIZE);
qp->user_qp.rq_offset = rq_offset;
- ret = get_mtt_entries(qp->dev, &qp->user_qp.rq_mtt, va + rq_offset,
+ ret = get_mtt_entries(qp->dev, &qp->user_qp.rq_mem, va + rq_offset,
qp->attrs.rq_size << RQE_SHIFT, 0, va + rq_offset,
- (SZ_1M - SZ_4K), 1);
+ (SZ_1M - SZ_4K), true);
if (ret)
goto put_sq_mtt;
@@ -687,18 +901,18 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
return 0;
put_rq_mtt:
- put_mtt_entries(qp->dev, &qp->user_qp.rq_mtt);
+ put_mtt_entries(qp->dev, &qp->user_qp.rq_mem);
put_sq_mtt:
- put_mtt_entries(qp->dev, &qp->user_qp.sq_mtt);
+ put_mtt_entries(qp->dev, &qp->user_qp.sq_mem);
return ret;
}
static void free_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx)
{
- put_mtt_entries(qp->dev, &qp->user_qp.sq_mtt);
- put_mtt_entries(qp->dev, &qp->user_qp.rq_mtt);
+ put_mtt_entries(qp->dev, &qp->user_qp.sq_mem);
+ put_mtt_entries(qp->dev, &qp->user_qp.rq_mem);
erdma_unmap_user_dbrecords(uctx, &qp->user_qp.user_dbr_page);
}
@@ -875,33 +1089,20 @@ struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
mr->mem.page_size = PAGE_SIZE; /* update it later. */
mr->mem.page_cnt = max_num_sg;
- mr->mem.mtt_type = ERDMA_MR_INDIRECT_MTT;
- mr->mem.mtt_buf =
- alloc_pages_exact(MTT_SIZE(mr->mem.page_cnt), GFP_KERNEL);
- if (!mr->mem.mtt_buf) {
- ret = -ENOMEM;
+ mr->mem.mtt = erdma_create_mtt(dev, MTT_SIZE(max_num_sg), true);
+ if (IS_ERR(mr->mem.mtt)) {
+ ret = PTR_ERR(mr->mem.mtt);
goto out_remove_stag;
}
- mr->mem.mtt_entry[0] =
- dma_map_single(&dev->pdev->dev, mr->mem.mtt_buf,
- MTT_SIZE(mr->mem.page_cnt), DMA_TO_DEVICE);
- if (dma_mapping_error(&dev->pdev->dev, mr->mem.mtt_entry[0])) {
- ret = -ENOMEM;
- goto out_free_mtt;
- }
-
ret = regmr_cmd(dev, mr);
if (ret)
- goto out_dma_unmap;
+ goto out_destroy_mtt;
return &mr->ibmr;
-out_dma_unmap:
- dma_unmap_single(&dev->pdev->dev, mr->mem.mtt_entry[0],
- MTT_SIZE(mr->mem.page_cnt), DMA_TO_DEVICE);
-out_free_mtt:
- free_pages_exact(mr->mem.mtt_buf, MTT_SIZE(mr->mem.page_cnt));
+out_destroy_mtt:
+ erdma_destroy_mtt(dev, mr->mem.mtt);
out_remove_stag:
erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
@@ -920,7 +1121,7 @@ static int erdma_set_page(struct ib_mr *ibmr, u64 addr)
if (mr->mem.mtt_nents >= mr->mem.page_cnt)
return -1;
- *((u64 *)mr->mem.mtt_buf + mr->mem.mtt_nents) = addr;
+ mr->mem.mtt->buf[mr->mem.mtt_nents] = addr;
mr->mem.mtt_nents++;
return 0;
@@ -956,7 +1157,7 @@ struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
return ERR_PTR(-ENOMEM);
ret = get_mtt_entries(dev, &mr->mem, start, len, access, virt,
- SZ_2G - SZ_4K, 0);
+ SZ_2G - SZ_4K, false);
if (ret)
goto err_out_free;
@@ -1041,7 +1242,7 @@ int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
} else {
erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
- put_mtt_entries(dev, &cq->user_cq.qbuf_mtt);
+ put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
}
xa_erase(&dev->cq_xa, cq->cqn);
@@ -1089,8 +1290,8 @@ int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
WARPPED_BUFSIZE(qp->attrs.sq_size << SQEBB_SHIFT),
qp->kern_qp.sq_buf, qp->kern_qp.sq_buf_dma_addr);
} else {
- put_mtt_entries(dev, &qp->user_qp.sq_mtt);
- put_mtt_entries(dev, &qp->user_qp.rq_mtt);
+ put_mtt_entries(dev, &qp->user_qp.sq_mem);
+ put_mtt_entries(dev, &qp->user_qp.rq_mem);
erdma_unmap_user_dbrecords(ctx, &qp->user_qp.user_dbr_page);
}
@@ -1379,9 +1580,9 @@ static int erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq,
int ret;
struct erdma_dev *dev = to_edev(cq->ibcq.device);
- ret = get_mtt_entries(dev, &cq->user_cq.qbuf_mtt, ureq->qbuf_va,
+ ret = get_mtt_entries(dev, &cq->user_cq.qbuf_mem, ureq->qbuf_va,
ureq->qbuf_len, 0, ureq->qbuf_va, SZ_64M - SZ_4K,
- 1);
+ true);
if (ret)
return ret;
@@ -1389,7 +1590,7 @@ static int erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq,
&cq->user_cq.user_dbr_page,
&cq->user_cq.db_info_dma_addr);
if (ret)
- put_mtt_entries(dev, &cq->user_cq.qbuf_mtt);
+ put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
return ret;
}
@@ -1473,7 +1674,7 @@ int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
err_free_res:
if (!rdma_is_kernel_res(&ibcq->res)) {
erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
- put_mtt_entries(dev, &cq->user_cq.qbuf_mtt);
+ put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
} else {
dma_free_coherent(&dev->pdev->dev,
WARPPED_BUFSIZE(depth << CQE_SHIFT),
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
index 429fc3063f98..eb9c0f92fb6f 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.h
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -65,7 +65,7 @@ struct erdma_pd {
* MemoryRegion definition.
*/
#define ERDMA_MAX_INLINE_MTT_ENTRIES 4
-#define MTT_SIZE(mtt_cnt) (mtt_cnt << 3) /* per mtt takes 8 Bytes. */
+#define MTT_SIZE(mtt_cnt) ((mtt_cnt) << 3) /* per mtt entry takes 8 Bytes. */
#define ERDMA_MR_MAX_MTT_CNT 524288
#define ERDMA_MTT_ENTRY_SIZE 8
@@ -73,8 +73,8 @@ struct erdma_pd {
#define ERDMA_MR_TYPE_FRMR 1
#define ERDMA_MR_TYPE_DMA 2
-#define ERDMA_MR_INLINE_MTT 0
-#define ERDMA_MR_INDIRECT_MTT 1
+#define ERDMA_MR_MTT_0LEVEL 0
+#define ERDMA_MR_MTT_1LEVEL 1
#define ERDMA_MR_ACC_RA BIT(0)
#define ERDMA_MR_ACC_LR BIT(1)
@@ -90,10 +90,28 @@ static inline u8 to_erdma_access_flags(int access)
(access & IB_ACCESS_REMOTE_ATOMIC ? ERDMA_MR_ACC_RA : 0);
}
+/* Hierarchical storage structure for MTT entries */
+struct erdma_mtt {
+ u64 *buf;
+ size_t size;
+
+ bool continuous;
+ union {
+ dma_addr_t buf_dma;
+ struct {
+ struct scatterlist *sglist;
+ u32 nsg;
+ u32 level;
+ };
+ };
+
+ struct erdma_mtt *low_level;
+};
+
struct erdma_mem {
struct ib_umem *umem;
- void *mtt_buf;
- u32 mtt_type;
+ struct erdma_mtt *mtt;
+
u32 page_size;
u32 page_offset;
u32 page_cnt;
@@ -101,8 +119,6 @@ struct erdma_mem {
u64 va;
u64 len;
-
- u64 mtt_entry[ERDMA_MAX_INLINE_MTT_ENTRIES];
};
struct erdma_mr {
@@ -121,8 +137,8 @@ struct erdma_user_dbrecords_page {
};
struct erdma_uqp {
- struct erdma_mem sq_mtt;
- struct erdma_mem rq_mtt;
+ struct erdma_mem sq_mem;
+ struct erdma_mem rq_mem;
dma_addr_t sq_db_info_dma_addr;
dma_addr_t rq_db_info_dma_addr;
@@ -234,7 +250,7 @@ struct erdma_kcq_info {
};
struct erdma_ucq_info {
- struct erdma_mem qbuf_mtt;
+ struct erdma_mem qbuf_mem;
struct erdma_user_dbrecords_page *user_dbr_page;
dma_addr_t db_info_dma_addr;
};
diff --git a/drivers/infiniband/hw/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile
index 2e89ec10efed..5d977f363684 100644
--- a/drivers/infiniband/hw/hfi1/Makefile
+++ b/drivers/infiniband/hw/hfi1/Makefile
@@ -31,6 +31,7 @@ hfi1-y := \
netdev_rx.o \
opfn.o \
pcie.o \
+ pin_system.o \
pio.o \
pio_copy.o \
platform.o \
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 77ee77d4000f..bbc957c578e1 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -230,11 +230,9 @@ static void node_affinity_add_tail(struct hfi1_affinity_node *entry)
/* It must be called with node_affinity.lock held */
static struct hfi1_affinity_node *node_affinity_lookup(int node)
{
- struct list_head *pos;
struct hfi1_affinity_node *entry;
- list_for_each(pos, &node_affinity.list) {
- entry = list_entry(pos, struct hfi1_affinity_node, list);
+ list_for_each_entry(entry, &node_affinity.list, list) {
if (entry->node == node)
return entry;
}
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index baaa4406d5e6..0814291a0412 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1461,7 +1461,8 @@ static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
ret = write_lcb_csr(dd, csr, data);
if (ret) {
- dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
+ if (!(dd->flags & HFI1_SHUTDOWN))
+ dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
return 0;
}
@@ -6160,7 +6161,7 @@ static int request_host_lcb_access(struct hfi1_devdata *dd)
ret = do_8051_command(dd, HCMD_MISC,
(u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
LOAD_DATA_FIELD_ID_SHIFT, NULL);
- if (ret != HCMD_SUCCESS) {
+ if (ret != HCMD_SUCCESS && !(dd->flags & HFI1_SHUTDOWN)) {
dd_dev_err(dd, "%s: command failed with error %d\n",
__func__, ret);
}
@@ -6241,7 +6242,8 @@ int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
if (dd->lcb_access_count == 0) {
ret = request_host_lcb_access(dd);
if (ret) {
- dd_dev_err(dd,
+ if (!(dd->flags & HFI1_SHUTDOWN))
+ dd_dev_err(dd,
"%s: unable to acquire LCB access, err %d\n",
__func__, ret);
goto done;
diff --git a/drivers/infiniband/hw/hfi1/device.c b/drivers/infiniband/hw/hfi1/device.c
index 05be0d119f79..b0a00b7aaec5 100644
--- a/drivers/infiniband/hw/hfi1/device.c
+++ b/drivers/infiniband/hw/hfi1/device.c
@@ -10,8 +10,29 @@
#include "hfi.h"
#include "device.h"
-static struct class *class;
-static struct class *user_class;
+static char *hfi1_devnode(const struct device *dev, umode_t *mode)
+{
+ if (mode)
+ *mode = 0600;
+ return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
+}
+
+static const struct class class = {
+ .name = "hfi1",
+ .devnode = hfi1_devnode,
+};
+
+static char *hfi1_user_devnode(const struct device *dev, umode_t *mode)
+{
+ if (mode)
+ *mode = 0666;
+ return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
+}
+
+static const struct class user_class = {
+ .name = "hfi1_user",
+ .devnode = hfi1_user_devnode,
+};
static dev_t hfi1_dev;
int hfi1_cdev_init(int minor, const char *name,
@@ -37,9 +58,9 @@ int hfi1_cdev_init(int minor, const char *name,
}
if (user_accessible)
- device = device_create(user_class, NULL, dev, NULL, "%s", name);
+ device = device_create(&user_class, NULL, dev, NULL, "%s", name);
else
- device = device_create(class, NULL, dev, NULL, "%s", name);
+ device = device_create(&class, NULL, dev, NULL, "%s", name);
if (IS_ERR(device)) {
ret = PTR_ERR(device);
@@ -72,26 +93,6 @@ const char *class_name(void)
return hfi1_class_name;
}
-static char *hfi1_devnode(const struct device *dev, umode_t *mode)
-{
- if (mode)
- *mode = 0600;
- return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
-}
-
-static const char *hfi1_class_name_user = "hfi1_user";
-static const char *class_name_user(void)
-{
- return hfi1_class_name_user;
-}
-
-static char *hfi1_user_devnode(const struct device *dev, umode_t *mode)
-{
- if (mode)
- *mode = 0666;
- return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
-}
-
int __init dev_init(void)
{
int ret;
@@ -102,27 +103,21 @@ int __init dev_init(void)
goto done;
}
- class = class_create(class_name());
- if (IS_ERR(class)) {
- ret = PTR_ERR(class);
+ ret = class_register(&class);
+ if (ret) {
pr_err("Could not create device class (err %d)\n", -ret);
unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
goto done;
}
- class->devnode = hfi1_devnode;
- user_class = class_create(class_name_user());
- if (IS_ERR(user_class)) {
- ret = PTR_ERR(user_class);
+ ret = class_register(&user_class);
+ if (ret) {
pr_err("Could not create device class for user accessible files (err %d)\n",
-ret);
- class_destroy(class);
- class = NULL;
- user_class = NULL;
+ class_unregister(&class);
unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
goto done;
}
- user_class->devnode = hfi1_user_devnode;
done:
return ret;
@@ -130,11 +125,8 @@ done:
void dev_cleanup(void)
{
- class_destroy(class);
- class = NULL;
-
- class_destroy(user_class);
- user_class = NULL;
+ class_unregister(&class);
+ class_unregister(&user_class);
unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
}
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 7fa9cd39254f..38772e52d7ed 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
- * Copyright(c) 2020 Cornelis Networks, Inc.
+ * Copyright(c) 2020-2023 Cornelis Networks, Inc.
* Copyright(c) 2015-2020 Intel Corporation.
*/
@@ -1378,8 +1378,6 @@ struct hfi1_devdata {
#define PT_INVALID 3
struct tid_rb_node;
-struct mmu_rb_node;
-struct mmu_rb_handler;
/* Private data for file operations */
struct hfi1_filedata {
diff --git a/drivers/infiniband/hw/hfi1/pin_system.c b/drivers/infiniband/hw/hfi1/pin_system.c
new file mode 100644
index 000000000000..384f722093e0
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/pin_system.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
+/*
+ * Copyright(c) 2023 - Cornelis Networks, Inc.
+ */
+
+#include <linux/types.h>
+
+#include "hfi.h"
+#include "common.h"
+#include "device.h"
+#include "pinning.h"
+#include "mmu_rb.h"
+#include "user_sdma.h"
+#include "trace.h"
+
+struct sdma_mmu_node {
+ struct mmu_rb_node rb;
+ struct hfi1_user_sdma_pkt_q *pq;
+ struct page **pages;
+ unsigned int npages;
+};
+
+static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
+ unsigned long len);
+static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode, void *arg2,
+ bool *stop);
+static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode);
+
+static struct mmu_rb_ops sdma_rb_ops = {
+ .filter = sdma_rb_filter,
+ .evict = sdma_rb_evict,
+ .remove = sdma_rb_remove,
+};
+
+int hfi1_init_system_pinning(struct hfi1_user_sdma_pkt_q *pq)
+{
+ struct hfi1_devdata *dd = pq->dd;
+ int ret;
+
+ ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq,
+ &pq->handler);
+ if (ret)
+ dd_dev_err(dd,
+ "[%u:%u] Failed to register system memory DMA support with MMU: %d\n",
+ pq->ctxt, pq->subctxt, ret);
+ return ret;
+}
+
+void hfi1_free_system_pinning(struct hfi1_user_sdma_pkt_q *pq)
+{
+ if (pq->handler)
+ hfi1_mmu_rb_unregister(pq->handler);
+}
+
+static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
+{
+ struct evict_data evict_data;
+
+ evict_data.cleared = 0;
+ evict_data.target = npages;
+ hfi1_mmu_rb_evict(pq->handler, &evict_data);
+ return evict_data.cleared;
+}
+
+static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
+ unsigned int start, unsigned int npages)
+{
+ hfi1_release_user_pages(mm, pages + start, npages, false);
+ kfree(pages);
+}
+
+static inline struct mm_struct *mm_from_sdma_node(struct sdma_mmu_node *node)
+{
+ return node->rb.handler->mn.mm;
+}
+
+static void free_system_node(struct sdma_mmu_node *node)
+{
+ if (node->npages) {
+ unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
+ node->npages);
+ atomic_sub(node->npages, &node->pq->n_locked);
+ }
+ kfree(node);
+}
+
+/*
+ * kref_get()'s an additional kref on the returned rb_node to prevent rb_node
+ * from being released until after rb_node is assigned to an SDMA descriptor
+ * (struct sdma_desc) under add_system_iovec_to_sdma_packet(), even if the
+ * virtual address range for rb_node is invalidated between now and then.
+ */
+static struct sdma_mmu_node *find_system_node(struct mmu_rb_handler *handler,
+ unsigned long start,
+ unsigned long end)
+{
+ struct mmu_rb_node *rb_node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&handler->lock, flags);
+ rb_node = hfi1_mmu_rb_get_first(handler, start, (end - start));
+ if (!rb_node) {
+ spin_unlock_irqrestore(&handler->lock, flags);
+ return NULL;
+ }
+
+ /* "safety" kref to prevent release before add_system_iovec_to_sdma_packet() */
+ kref_get(&rb_node->refcount);
+ spin_unlock_irqrestore(&handler->lock, flags);
+
+ return container_of(rb_node, struct sdma_mmu_node, rb);
+}
+
+static int pin_system_pages(struct user_sdma_request *req,
+ uintptr_t start_address, size_t length,
+ struct sdma_mmu_node *node, int npages)
+{
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+ int pinned, cleared;
+ struct page **pages;
+
+ pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+retry:
+ if (!hfi1_can_pin_pages(pq->dd, current->mm, atomic_read(&pq->n_locked),
+ npages)) {
+ SDMA_DBG(req, "Evicting: nlocked %u npages %u",
+ atomic_read(&pq->n_locked), npages);
+ cleared = sdma_cache_evict(pq, npages);
+ if (cleared >= npages)
+ goto retry;
+ }
+
+ SDMA_DBG(req, "Acquire user pages start_address %lx node->npages %u npages %u",
+ start_address, node->npages, npages);
+ pinned = hfi1_acquire_user_pages(current->mm, start_address, npages, 0,
+ pages);
+
+ if (pinned < 0) {
+ kfree(pages);
+ SDMA_DBG(req, "pinned %d", pinned);
+ return pinned;
+ }
+ if (pinned != npages) {
+ unpin_vector_pages(current->mm, pages, node->npages, pinned);
+ SDMA_DBG(req, "npages %u pinned %d", npages, pinned);
+ return -EFAULT;
+ }
+ node->rb.addr = start_address;
+ node->rb.len = length;
+ node->pages = pages;
+ node->npages = npages;
+ atomic_add(pinned, &pq->n_locked);
+ SDMA_DBG(req, "done. pinned %d", pinned);
+ return 0;
+}
+
+/*
+ * kref refcount on *node_p will be 2 on successful addition: one kref from
+ * kref_init() for mmu_rb_handler and one kref to prevent *node_p from being
+ * released until after *node_p is assigned to an SDMA descriptor (struct
+ * sdma_desc) under add_system_iovec_to_sdma_packet(), even if the virtual
+ * address range for *node_p is invalidated between now and then.
+ */
+static int add_system_pinning(struct user_sdma_request *req,
+ struct sdma_mmu_node **node_p,
+ unsigned long start, unsigned long len)
+
+{
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+ struct sdma_mmu_node *node;
+ int ret;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ /* First kref "moves" to mmu_rb_handler */
+ kref_init(&node->rb.refcount);
+
+ /* "safety" kref to prevent release before add_system_iovec_to_sdma_packet() */
+ kref_get(&node->rb.refcount);
+
+ node->pq = pq;
+ ret = pin_system_pages(req, start, len, node, PFN_DOWN(len));
+ if (ret == 0) {
+ ret = hfi1_mmu_rb_insert(pq->handler, &node->rb);
+ if (ret)
+ free_system_node(node);
+ else
+ *node_p = node;
+
+ return ret;
+ }
+
+ kfree(node);
+ return ret;
+}
+
+static int get_system_cache_entry(struct user_sdma_request *req,
+ struct sdma_mmu_node **node_p,
+ size_t req_start, size_t req_len)
+{
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+ u64 start = ALIGN_DOWN(req_start, PAGE_SIZE);
+ u64 end = PFN_ALIGN(req_start + req_len);
+ int ret;
+
+ if ((end - start) == 0) {
+ SDMA_DBG(req,
+ "Request for empty cache entry req_start %lx req_len %lx start %llx end %llx",
+ req_start, req_len, start, end);
+ return -EINVAL;
+ }
+
+ SDMA_DBG(req, "req_start %lx req_len %lu", req_start, req_len);
+
+ while (1) {
+ struct sdma_mmu_node *node =
+ find_system_node(pq->handler, start, end);
+ u64 prepend_len = 0;
+
+ SDMA_DBG(req, "node %p start %llx end %llu", node, start, end);
+ if (!node) {
+ ret = add_system_pinning(req, node_p, start,
+ end - start);
+ if (ret == -EEXIST) {
+ /*
+ * Another execution context has inserted a
+ * conficting entry first.
+ */
+ continue;
+ }
+ return ret;
+ }
+
+ if (node->rb.addr <= start) {
+ /*
+ * This entry covers at least part of the region. If it doesn't extend
+ * to the end, then this will be called again for the next segment.
+ */
+ *node_p = node;
+ return 0;
+ }
+
+ SDMA_DBG(req, "prepend: node->rb.addr %lx, node->rb.refcount %d",
+ node->rb.addr, kref_read(&node->rb.refcount));
+ prepend_len = node->rb.addr - start;
+
+ /*
+ * This node will not be returned, instead a new node
+ * will be. So release the reference.
+ */
+ kref_put(&node->rb.refcount, hfi1_mmu_rb_release);
+
+ /* Prepend a node to cover the beginning of the allocation */
+ ret = add_system_pinning(req, node_p, start, prepend_len);
+ if (ret == -EEXIST) {
+ /* Another execution context has inserted a conficting entry first. */
+ continue;
+ }
+ return ret;
+ }
+}
+
+static void sdma_mmu_rb_node_get(void *ctx)
+{
+ struct mmu_rb_node *node = ctx;
+
+ kref_get(&node->refcount);
+}
+
+static void sdma_mmu_rb_node_put(void *ctx)
+{
+ struct sdma_mmu_node *node = ctx;
+
+ kref_put(&node->rb.refcount, hfi1_mmu_rb_release);
+}
+
+static int add_mapping_to_sdma_packet(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx,
+ struct sdma_mmu_node *cache_entry,
+ size_t start,
+ size_t from_this_cache_entry)
+{
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+ unsigned int page_offset;
+ unsigned int from_this_page;
+ size_t page_index;
+ void *ctx;
+ int ret;
+
+ /*
+ * Because the cache may be more fragmented than the memory that is being accessed,
+ * it's not strictly necessary to have a descriptor per cache entry.
+ */
+
+ while (from_this_cache_entry) {
+ page_index = PFN_DOWN(start - cache_entry->rb.addr);
+
+ if (page_index >= cache_entry->npages) {
+ SDMA_DBG(req,
+ "Request for page_index %zu >= cache_entry->npages %u",
+ page_index, cache_entry->npages);
+ return -EINVAL;
+ }
+
+ page_offset = start - ALIGN_DOWN(start, PAGE_SIZE);
+ from_this_page = PAGE_SIZE - page_offset;
+
+ if (from_this_page < from_this_cache_entry) {
+ ctx = NULL;
+ } else {
+ /*
+ * In the case they are equal the next line has no practical effect,
+ * but it's better to do a register to register copy than a conditional
+ * branch.
+ */
+ from_this_page = from_this_cache_entry;
+ ctx = cache_entry;
+ }
+
+ ret = sdma_txadd_page(pq->dd, &tx->txreq,
+ cache_entry->pages[page_index],
+ page_offset, from_this_page,
+ ctx,
+ sdma_mmu_rb_node_get,
+ sdma_mmu_rb_node_put);
+ if (ret) {
+ /*
+ * When there's a failure, the entire request is freed by
+ * user_sdma_send_pkts().
+ */
+ SDMA_DBG(req,
+ "sdma_txadd_page failed %d page_index %lu page_offset %u from_this_page %u",
+ ret, page_index, page_offset, from_this_page);
+ return ret;
+ }
+ start += from_this_page;
+ from_this_cache_entry -= from_this_page;
+ }
+ return 0;
+}
+
+static int add_system_iovec_to_sdma_packet(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx,
+ struct user_sdma_iovec *iovec,
+ size_t from_this_iovec)
+{
+ while (from_this_iovec > 0) {
+ struct sdma_mmu_node *cache_entry;
+ size_t from_this_cache_entry;
+ size_t start;
+ int ret;
+
+ start = (uintptr_t)iovec->iov.iov_base + iovec->offset;
+ ret = get_system_cache_entry(req, &cache_entry, start,
+ from_this_iovec);
+ if (ret) {
+ SDMA_DBG(req, "pin system segment failed %d", ret);
+ return ret;
+ }
+
+ from_this_cache_entry = cache_entry->rb.len - (start - cache_entry->rb.addr);
+ if (from_this_cache_entry > from_this_iovec)
+ from_this_cache_entry = from_this_iovec;
+
+ ret = add_mapping_to_sdma_packet(req, tx, cache_entry, start,
+ from_this_cache_entry);
+
+ /*
+ * Done adding cache_entry to zero or more sdma_desc. Can
+ * kref_put() the "safety" kref taken under
+ * get_system_cache_entry().
+ */
+ kref_put(&cache_entry->rb.refcount, hfi1_mmu_rb_release);
+
+ if (ret) {
+ SDMA_DBG(req, "add system segment failed %d", ret);
+ return ret;
+ }
+
+ iovec->offset += from_this_cache_entry;
+ from_this_iovec -= from_this_cache_entry;
+ }
+
+ return 0;
+}
+
+/*
+ * Add up to pkt_data_remaining bytes to the txreq, starting at the current
+ * offset in the given iovec entry and continuing until all data has been added
+ * to the iovec or the iovec entry type changes.
+ *
+ * On success, prior to returning, adjust pkt_data_remaining, req->iov_idx, and
+ * the offset value in req->iov[req->iov_idx] to reflect the data that has been
+ * consumed.
+ */
+int hfi1_add_pages_to_sdma_packet(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx,
+ struct user_sdma_iovec *iovec,
+ u32 *pkt_data_remaining)
+{
+ size_t remaining_to_add = *pkt_data_remaining;
+ /*
+ * Walk through iovec entries, ensure the associated pages
+ * are pinned and mapped, add data to the packet until no more
+ * data remains to be added or the iovec entry type changes.
+ */
+ while (remaining_to_add > 0) {
+ struct user_sdma_iovec *cur_iovec;
+ size_t from_this_iovec;
+ int ret;
+
+ cur_iovec = iovec;
+ from_this_iovec = iovec->iov.iov_len - iovec->offset;
+
+ if (from_this_iovec > remaining_to_add) {
+ from_this_iovec = remaining_to_add;
+ } else {
+ /* The current iovec entry will be consumed by this pass. */
+ req->iov_idx++;
+ iovec++;
+ }
+
+ ret = add_system_iovec_to_sdma_packet(req, tx, cur_iovec,
+ from_this_iovec);
+ if (ret)
+ return ret;
+
+ remaining_to_add -= from_this_iovec;
+ }
+ *pkt_data_remaining = remaining_to_add;
+
+ return 0;
+}
+
+static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
+ unsigned long len)
+{
+ return (bool)(node->addr == addr);
+}
+
+/*
+ * Return 1 to remove the node from the rb tree and call the remove op.
+ *
+ * Called with the rb tree lock held.
+ */
+static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
+ void *evict_arg, bool *stop)
+{
+ struct sdma_mmu_node *node =
+ container_of(mnode, struct sdma_mmu_node, rb);
+ struct evict_data *evict_data = evict_arg;
+
+ /* this node will be evicted, add its pages to our count */
+ evict_data->cleared += node->npages;
+
+ /* have enough pages been cleared? */
+ if (evict_data->cleared >= evict_data->target)
+ *stop = true;
+
+ return 1; /* remove this node */
+}
+
+static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
+{
+ struct sdma_mmu_node *node =
+ container_of(mnode, struct sdma_mmu_node, rb);
+
+ free_system_node(node);
+}
diff --git a/drivers/infiniband/hw/hfi1/pinning.h b/drivers/infiniband/hw/hfi1/pinning.h
new file mode 100644
index 000000000000..a814a3aa9654
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/pinning.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
+/*
+ * Copyright(c) 2023 Cornelis Networks, Inc.
+ */
+#ifndef _HFI1_PINNING_H
+#define _HFI1_PINNING_H
+
+struct hfi1_user_sdma_pkt_q;
+struct user_sdma_request;
+struct user_sdma_txreq;
+struct user_sdma_iovec;
+
+int hfi1_init_system_pinning(struct hfi1_user_sdma_pkt_q *pq);
+void hfi1_free_system_pinning(struct hfi1_user_sdma_pkt_q *pq);
+int hfi1_add_pages_to_sdma_packet(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx,
+ struct user_sdma_iovec *iovec,
+ u32 *pkt_data_remaining);
+
+#endif /* _HFI1_PINNING_H */
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 62e7dc9bea7b..dfea53e0fdeb 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -1893,9 +1893,7 @@ int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
vl_scontexts[i] = sc_per_vl + (extra > 0 ? 1 : 0);
}
/* build new map */
- newmap = kzalloc(sizeof(*newmap) +
- roundup_pow_of_two(num_vls) *
- sizeof(struct pio_map_elem *),
+ newmap = kzalloc(struct_size(newmap, map, roundup_pow_of_two(num_vls)),
GFP_KERNEL);
if (!newmap)
goto bail;
@@ -1910,9 +1908,8 @@ int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
int sz = roundup_pow_of_two(vl_scontexts[i]);
/* only allocate once */
- newmap->map[i] = kzalloc(sizeof(*newmap->map[i]) +
- sz * sizeof(struct
- send_context *),
+ newmap->map[i] = kzalloc(struct_size(newmap->map[i],
+ ksc, sz),
GFP_KERNEL);
if (!newmap->map[i])
goto bail;
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 02bd62b857b7..29ae7beb9b03 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
- * Copyright(c) 2020 - Cornelis Networks, Inc.
+ * Copyright(c) 2020 - 2023 Cornelis Networks, Inc.
* Copyright(c) 2015 - 2018 Intel Corporation.
*/
@@ -60,22 +60,6 @@ static int defer_packet_queue(
uint seq,
bool pkts_sent);
static void activate_packet_queue(struct iowait *wait, int reason);
-static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
- unsigned long len);
-static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
- void *arg2, bool *stop);
-static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode);
-
-static struct mmu_rb_ops sdma_rb_ops = {
- .filter = sdma_rb_filter,
- .evict = sdma_rb_evict,
- .remove = sdma_rb_remove,
-};
-
-static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
- struct user_sdma_txreq *tx,
- struct user_sdma_iovec *iovec,
- u32 *pkt_remaining);
static int defer_packet_queue(
struct sdma_engine *sde,
@@ -185,12 +169,9 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
cq->nentries = hfi1_sdma_comp_ring_size;
- ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq,
- &pq->handler);
- if (ret) {
- dd_dev_err(dd, "Failed to register with MMU %d", ret);
+ ret = hfi1_init_system_pinning(pq);
+ if (ret)
goto pq_mmu_fail;
- }
rcu_assign_pointer(fd->pq, pq);
fd->cq = cq;
@@ -249,8 +230,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
pq->wait,
!atomic_read(&pq->n_reqs));
kfree(pq->reqs);
- if (pq->handler)
- hfi1_mmu_rb_unregister(pq->handler);
+ hfi1_free_system_pinning(pq);
bitmap_free(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache);
flush_pq_iowait(pq);
@@ -821,8 +801,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
req->tidoffset += datalen;
req->sent += datalen;
while (datalen) {
- ret = add_system_pages_to_sdma_packet(req, tx, iovec,
- &datalen);
+ ret = hfi1_add_pages_to_sdma_packet(req, tx, iovec,
+ &datalen);
if (ret)
goto free_txreq;
iovec = &req->iovs[req->iov_idx];
@@ -860,17 +840,6 @@ free_tx:
return ret;
}
-static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
-{
- struct evict_data evict_data;
- struct mmu_rb_handler *handler = pq->handler;
-
- evict_data.cleared = 0;
- evict_data.target = npages;
- hfi1_mmu_rb_evict(handler, &evict_data);
- return evict_data.cleared;
-}
-
static int check_header_template(struct user_sdma_request *req,
struct hfi1_pkt_header *hdr, u32 lrhlen,
u32 datalen)
@@ -1253,401 +1222,3 @@ static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt,
idx, state, ret);
}
-
-static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
- unsigned int start, unsigned int npages)
-{
- hfi1_release_user_pages(mm, pages + start, npages, false);
- kfree(pages);
-}
-
-static void free_system_node(struct sdma_mmu_node *node)
-{
- if (node->npages) {
- unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
- node->npages);
- atomic_sub(node->npages, &node->pq->n_locked);
- }
- kfree(node);
-}
-
-/*
- * kref_get()'s an additional kref on the returned rb_node to prevent rb_node
- * from being released until after rb_node is assigned to an SDMA descriptor
- * (struct sdma_desc) under add_system_iovec_to_sdma_packet(), even if the
- * virtual address range for rb_node is invalidated between now and then.
- */
-static struct sdma_mmu_node *find_system_node(struct mmu_rb_handler *handler,
- unsigned long start,
- unsigned long end)
-{
- struct mmu_rb_node *rb_node;
- unsigned long flags;
-
- spin_lock_irqsave(&handler->lock, flags);
- rb_node = hfi1_mmu_rb_get_first(handler, start, (end - start));
- if (!rb_node) {
- spin_unlock_irqrestore(&handler->lock, flags);
- return NULL;
- }
-
- /* "safety" kref to prevent release before add_system_iovec_to_sdma_packet() */
- kref_get(&rb_node->refcount);
- spin_unlock_irqrestore(&handler->lock, flags);
-
- return container_of(rb_node, struct sdma_mmu_node, rb);
-}
-
-static int pin_system_pages(struct user_sdma_request *req,
- uintptr_t start_address, size_t length,
- struct sdma_mmu_node *node, int npages)
-{
- struct hfi1_user_sdma_pkt_q *pq = req->pq;
- int pinned, cleared;
- struct page **pages;
-
- pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
-retry:
- if (!hfi1_can_pin_pages(pq->dd, current->mm, atomic_read(&pq->n_locked),
- npages)) {
- SDMA_DBG(req, "Evicting: nlocked %u npages %u",
- atomic_read(&pq->n_locked), npages);
- cleared = sdma_cache_evict(pq, npages);
- if (cleared >= npages)
- goto retry;
- }
-
- SDMA_DBG(req, "Acquire user pages start_address %lx node->npages %u npages %u",
- start_address, node->npages, npages);
- pinned = hfi1_acquire_user_pages(current->mm, start_address, npages, 0,
- pages);
-
- if (pinned < 0) {
- kfree(pages);
- SDMA_DBG(req, "pinned %d", pinned);
- return pinned;
- }
- if (pinned != npages) {
- unpin_vector_pages(current->mm, pages, node->npages, pinned);
- SDMA_DBG(req, "npages %u pinned %d", npages, pinned);
- return -EFAULT;
- }
- node->rb.addr = start_address;
- node->rb.len = length;
- node->pages = pages;
- node->npages = npages;
- atomic_add(pinned, &pq->n_locked);
- SDMA_DBG(req, "done. pinned %d", pinned);
- return 0;
-}
-
-/*
- * kref refcount on *node_p will be 2 on successful addition: one kref from
- * kref_init() for mmu_rb_handler and one kref to prevent *node_p from being
- * released until after *node_p is assigned to an SDMA descriptor (struct
- * sdma_desc) under add_system_iovec_to_sdma_packet(), even if the virtual
- * address range for *node_p is invalidated between now and then.
- */
-static int add_system_pinning(struct user_sdma_request *req,
- struct sdma_mmu_node **node_p,
- unsigned long start, unsigned long len)
-
-{
- struct hfi1_user_sdma_pkt_q *pq = req->pq;
- struct sdma_mmu_node *node;
- int ret;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
-
- /* First kref "moves" to mmu_rb_handler */
- kref_init(&node->rb.refcount);
-
- /* "safety" kref to prevent release before add_system_iovec_to_sdma_packet() */
- kref_get(&node->rb.refcount);
-
- node->pq = pq;
- ret = pin_system_pages(req, start, len, node, PFN_DOWN(len));
- if (ret == 0) {
- ret = hfi1_mmu_rb_insert(pq->handler, &node->rb);
- if (ret)
- free_system_node(node);
- else
- *node_p = node;
-
- return ret;
- }
-
- kfree(node);
- return ret;
-}
-
-static int get_system_cache_entry(struct user_sdma_request *req,
- struct sdma_mmu_node **node_p,
- size_t req_start, size_t req_len)
-{
- struct hfi1_user_sdma_pkt_q *pq = req->pq;
- u64 start = ALIGN_DOWN(req_start, PAGE_SIZE);
- u64 end = PFN_ALIGN(req_start + req_len);
- struct mmu_rb_handler *handler = pq->handler;
- int ret;
-
- if ((end - start) == 0) {
- SDMA_DBG(req,
- "Request for empty cache entry req_start %lx req_len %lx start %llx end %llx",
- req_start, req_len, start, end);
- return -EINVAL;
- }
-
- SDMA_DBG(req, "req_start %lx req_len %lu", req_start, req_len);
-
- while (1) {
- struct sdma_mmu_node *node =
- find_system_node(handler, start, end);
- u64 prepend_len = 0;
-
- SDMA_DBG(req, "node %p start %llx end %llu", node, start, end);
- if (!node) {
- ret = add_system_pinning(req, node_p, start,
- end - start);
- if (ret == -EEXIST) {
- /*
- * Another execution context has inserted a
- * conficting entry first.
- */
- continue;
- }
- return ret;
- }
-
- if (node->rb.addr <= start) {
- /*
- * This entry covers at least part of the region. If it doesn't extend
- * to the end, then this will be called again for the next segment.
- */
- *node_p = node;
- return 0;
- }
-
- SDMA_DBG(req, "prepend: node->rb.addr %lx, node->rb.refcount %d",
- node->rb.addr, kref_read(&node->rb.refcount));
- prepend_len = node->rb.addr - start;
-
- /*
- * This node will not be returned, instead a new node
- * will be. So release the reference.
- */
- kref_put(&node->rb.refcount, hfi1_mmu_rb_release);
-
- /* Prepend a node to cover the beginning of the allocation */
- ret = add_system_pinning(req, node_p, start, prepend_len);
- if (ret == -EEXIST) {
- /* Another execution context has inserted a conficting entry first. */
- continue;
- }
- return ret;
- }
-}
-
-static void sdma_mmu_rb_node_get(void *ctx)
-{
- struct mmu_rb_node *node = ctx;
-
- kref_get(&node->refcount);
-}
-
-static void sdma_mmu_rb_node_put(void *ctx)
-{
- struct sdma_mmu_node *node = ctx;
-
- kref_put(&node->rb.refcount, hfi1_mmu_rb_release);
-}
-
-static int add_mapping_to_sdma_packet(struct user_sdma_request *req,
- struct user_sdma_txreq *tx,
- struct sdma_mmu_node *cache_entry,
- size_t start,
- size_t from_this_cache_entry)
-{
- struct hfi1_user_sdma_pkt_q *pq = req->pq;
- unsigned int page_offset;
- unsigned int from_this_page;
- size_t page_index;
- void *ctx;
- int ret;
-
- /*
- * Because the cache may be more fragmented than the memory that is being accessed,
- * it's not strictly necessary to have a descriptor per cache entry.
- */
-
- while (from_this_cache_entry) {
- page_index = PFN_DOWN(start - cache_entry->rb.addr);
-
- if (page_index >= cache_entry->npages) {
- SDMA_DBG(req,
- "Request for page_index %zu >= cache_entry->npages %u",
- page_index, cache_entry->npages);
- return -EINVAL;
- }
-
- page_offset = start - ALIGN_DOWN(start, PAGE_SIZE);
- from_this_page = PAGE_SIZE - page_offset;
-
- if (from_this_page < from_this_cache_entry) {
- ctx = NULL;
- } else {
- /*
- * In the case they are equal the next line has no practical effect,
- * but it's better to do a register to register copy than a conditional
- * branch.
- */
- from_this_page = from_this_cache_entry;
- ctx = cache_entry;
- }
-
- ret = sdma_txadd_page(pq->dd, &tx->txreq,
- cache_entry->pages[page_index],
- page_offset, from_this_page,
- ctx,
- sdma_mmu_rb_node_get,
- sdma_mmu_rb_node_put);
- if (ret) {
- /*
- * When there's a failure, the entire request is freed by
- * user_sdma_send_pkts().
- */
- SDMA_DBG(req,
- "sdma_txadd_page failed %d page_index %lu page_offset %u from_this_page %u",
- ret, page_index, page_offset, from_this_page);
- return ret;
- }
- start += from_this_page;
- from_this_cache_entry -= from_this_page;
- }
- return 0;
-}
-
-static int add_system_iovec_to_sdma_packet(struct user_sdma_request *req,
- struct user_sdma_txreq *tx,
- struct user_sdma_iovec *iovec,
- size_t from_this_iovec)
-{
- while (from_this_iovec > 0) {
- struct sdma_mmu_node *cache_entry;
- size_t from_this_cache_entry;
- size_t start;
- int ret;
-
- start = (uintptr_t)iovec->iov.iov_base + iovec->offset;
- ret = get_system_cache_entry(req, &cache_entry, start,
- from_this_iovec);
- if (ret) {
- SDMA_DBG(req, "pin system segment failed %d", ret);
- return ret;
- }
-
- from_this_cache_entry = cache_entry->rb.len - (start - cache_entry->rb.addr);
- if (from_this_cache_entry > from_this_iovec)
- from_this_cache_entry = from_this_iovec;
-
- ret = add_mapping_to_sdma_packet(req, tx, cache_entry, start,
- from_this_cache_entry);
-
- /*
- * Done adding cache_entry to zero or more sdma_desc. Can
- * kref_put() the "safety" kref taken under
- * get_system_cache_entry().
- */
- kref_put(&cache_entry->rb.refcount, hfi1_mmu_rb_release);
-
- if (ret) {
- SDMA_DBG(req, "add system segment failed %d", ret);
- return ret;
- }
-
- iovec->offset += from_this_cache_entry;
- from_this_iovec -= from_this_cache_entry;
- }
-
- return 0;
-}
-
-static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
- struct user_sdma_txreq *tx,
- struct user_sdma_iovec *iovec,
- u32 *pkt_data_remaining)
-{
- size_t remaining_to_add = *pkt_data_remaining;
- /*
- * Walk through iovec entries, ensure the associated pages
- * are pinned and mapped, add data to the packet until no more
- * data remains to be added.
- */
- while (remaining_to_add > 0) {
- struct user_sdma_iovec *cur_iovec;
- size_t from_this_iovec;
- int ret;
-
- cur_iovec = iovec;
- from_this_iovec = iovec->iov.iov_len - iovec->offset;
-
- if (from_this_iovec > remaining_to_add) {
- from_this_iovec = remaining_to_add;
- } else {
- /* The current iovec entry will be consumed by this pass. */
- req->iov_idx++;
- iovec++;
- }
-
- ret = add_system_iovec_to_sdma_packet(req, tx, cur_iovec,
- from_this_iovec);
- if (ret)
- return ret;
-
- remaining_to_add -= from_this_iovec;
- }
- *pkt_data_remaining = remaining_to_add;
-
- return 0;
-}
-
-static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
- unsigned long len)
-{
- return (bool)(node->addr == addr);
-}
-
-/*
- * Return 1 to remove the node from the rb tree and call the remove op.
- *
- * Called with the rb tree lock held.
- */
-static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
- void *evict_arg, bool *stop)
-{
- struct sdma_mmu_node *node =
- container_of(mnode, struct sdma_mmu_node, rb);
- struct evict_data *evict_data = evict_arg;
-
- /* this node will be evicted, add its pages to our count */
- evict_data->cleared += node->npages;
-
- /* have enough pages been cleared? */
- if (evict_data->cleared >= evict_data->target)
- *stop = true;
-
- return 1; /* remove this node */
-}
-
-static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
-{
- struct sdma_mmu_node *node =
- container_of(mnode, struct sdma_mmu_node, rb);
-
- free_system_node(node);
-}
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index 548347d4c5bc..742ec1470cc5 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
- * Copyright(c) 2020 - Cornelis Networks, Inc.
+ * Copyright(c) 2023 - Cornelis Networks, Inc.
* Copyright(c) 2015 - 2018 Intel Corporation.
*/
#ifndef _HFI1_USER_SDMA_H
@@ -13,6 +13,8 @@
#include "iowait.h"
#include "user_exp_rcv.h"
#include "mmu_rb.h"
+#include "pinning.h"
+#include "sdma.h"
/* The maximum number of Data io vectors per message/request */
#define MAX_VECTORS_PER_REQ 8
@@ -101,13 +103,6 @@ struct hfi1_user_sdma_comp_q {
struct hfi1_sdma_comp_entry *comps;
};
-struct sdma_mmu_node {
- struct mmu_rb_node rb;
- struct hfi1_user_sdma_pkt_q *pq;
- struct page **pages;
- unsigned int npages;
-};
-
struct user_sdma_iovec {
struct list_head list;
struct iovec iov;
@@ -203,10 +198,4 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
struct iovec *iovec, unsigned long dim,
unsigned long *count);
-
-static inline struct mm_struct *mm_from_sdma_node(struct sdma_mmu_node *node)
-{
- return node->rb.handler->mn.mm;
-}
-
#endif /* _HFI1_USER_SDMA_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 84239b907de2..7f0d0288beb1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -97,6 +97,7 @@
#define HNS_ROCE_CQ_BANK_NUM 4
#define CQ_BANKID_SHIFT 2
+#define CQ_BANKID_MASK GENMASK(1, 0)
enum {
SERV_TYPE_RC,
@@ -714,7 +715,6 @@ struct hns_roce_caps {
u32 max_rq_sg;
u32 rsv0;
u32 num_qps;
- u32 num_pi_qps;
u32 reserved_qps;
u32 num_srqs;
u32 max_wqes;
@@ -840,6 +840,32 @@ enum hns_roce_device_state {
HNS_ROCE_DEVICE_STATE_UNINIT,
};
+enum hns_roce_hw_pkt_stat_index {
+ HNS_ROCE_HW_RX_RC_PKT_CNT,
+ HNS_ROCE_HW_RX_UC_PKT_CNT,
+ HNS_ROCE_HW_RX_UD_PKT_CNT,
+ HNS_ROCE_HW_RX_XRC_PKT_CNT,
+ HNS_ROCE_HW_RX_PKT_CNT,
+ HNS_ROCE_HW_RX_ERR_PKT_CNT,
+ HNS_ROCE_HW_RX_CNP_PKT_CNT,
+ HNS_ROCE_HW_TX_RC_PKT_CNT,
+ HNS_ROCE_HW_TX_UC_PKT_CNT,
+ HNS_ROCE_HW_TX_UD_PKT_CNT,
+ HNS_ROCE_HW_TX_XRC_PKT_CNT,
+ HNS_ROCE_HW_TX_PKT_CNT,
+ HNS_ROCE_HW_TX_ERR_PKT_CNT,
+ HNS_ROCE_HW_TX_CNP_PKT_CNT,
+ HNS_ROCE_HW_TRP_GET_MPT_ERR_PKT_CNT,
+ HNS_ROCE_HW_TRP_GET_IRRL_ERR_PKT_CNT,
+ HNS_ROCE_HW_ECN_DB_CNT,
+ HNS_ROCE_HW_RX_BUF_CNT,
+ HNS_ROCE_HW_TRP_RX_SOF_CNT,
+ HNS_ROCE_HW_CQ_CQE_CNT,
+ HNS_ROCE_HW_CQ_POE_CNT,
+ HNS_ROCE_HW_CQ_NOTIFY_CNT,
+ HNS_ROCE_HW_CNT_TOTAL
+};
+
struct hns_roce_hw {
int (*cmq_init)(struct hns_roce_dev *hr_dev);
void (*cmq_exit)(struct hns_roce_dev *hr_dev);
@@ -882,6 +908,8 @@ struct hns_roce_hw {
int (*query_cqc)(struct hns_roce_dev *hr_dev, u32 cqn, void *buffer);
int (*query_qpc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
int (*query_mpt)(struct hns_roce_dev *hr_dev, u32 key, void *buffer);
+ int (*query_hw_counter)(struct hns_roce_dev *hr_dev,
+ u64 *stats, u32 port, int *hw_counters);
const struct ib_device_ops *hns_roce_dev_ops;
const struct ib_device_ops *hns_roce_dev_srq_ops;
};
@@ -1112,7 +1140,6 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
-void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
@@ -1161,9 +1188,6 @@ int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int hns_roce_create_srq(struct ib_srq *srq,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata);
-int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
- enum ib_srq_attr_mask srq_attr_mask,
- struct ib_udata *udata);
int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
@@ -1206,7 +1230,6 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp);
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
-u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index);
void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 47c0efed1821..c4ac06a33869 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -78,7 +78,7 @@ bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
return false;
}
- return hop_num ? true : false;
+ return hop_num;
}
static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 hem_idx,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 8f7eb11066b4..d82daff2d9bd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -750,7 +750,8 @@ out:
qp->sq.head += nreq;
qp->next_sge = sge_idx;
- if (nreq == 1 && (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
+ if (nreq == 1 && !ret &&
+ (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
write_dwqe(hr_dev, qp, wqe);
else
update_sq_db(hr_dev, qp);
@@ -1612,6 +1613,56 @@ static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev)
return 0;
}
+static int hns_roce_hw_v2_query_counter(struct hns_roce_dev *hr_dev,
+ u64 *stats, u32 port, int *num_counters)
+{
+#define CNT_PER_DESC 3
+ struct hns_roce_cmq_desc *desc;
+ int bd_idx, cnt_idx;
+ __le64 *cnt_data;
+ int desc_num;
+ int ret;
+ int i;
+
+ if (port > hr_dev->caps.num_ports)
+ return -EINVAL;
+
+ desc_num = DIV_ROUND_UP(HNS_ROCE_HW_CNT_TOTAL, CNT_PER_DESC);
+ desc = kcalloc(desc_num, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ for (i = 0; i < desc_num; i++) {
+ hns_roce_cmq_setup_basic_desc(&desc[i],
+ HNS_ROCE_OPC_QUERY_COUNTER, true);
+ if (i != desc_num - 1)
+ desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ }
+
+ ret = hns_roce_cmq_send(hr_dev, desc, desc_num);
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to get counter, ret = %d.\n", ret);
+ goto err_out;
+ }
+
+ for (i = 0; i < HNS_ROCE_HW_CNT_TOTAL && i < *num_counters; i++) {
+ bd_idx = i / CNT_PER_DESC;
+ if (!(desc[bd_idx].flag & HNS_ROCE_CMD_FLAG_NEXT) &&
+ bd_idx != HNS_ROCE_HW_CNT_TOTAL / CNT_PER_DESC)
+ break;
+
+ cnt_data = (__le64 *)&desc[bd_idx].data[0];
+ cnt_idx = i % CNT_PER_DESC;
+ stats[i] = le64_to_cpu(cnt_data[cnt_idx]);
+ }
+ *num_counters = i;
+
+err_out:
+ kfree(desc);
+ return ret;
+}
+
static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc;
@@ -1680,29 +1731,6 @@ static int load_func_res_caps(struct hns_roce_dev *hr_dev, bool is_vf)
return 0;
}
-static int load_ext_cfg_caps(struct hns_roce_dev *hr_dev, bool is_vf)
-{
- struct hns_roce_cmq_desc desc;
- struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
- struct hns_roce_caps *caps = &hr_dev->caps;
- u32 func_num, qp_num;
- int ret;
-
- hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_EXT_CFG, true);
- ret = hns_roce_cmq_send(hr_dev, &desc, 1);
- if (ret)
- return ret;
-
- func_num = is_vf ? 1 : max_t(u32, 1, hr_dev->func_num);
- qp_num = hr_reg_read(req, EXT_CFG_QP_PI_NUM) / func_num;
- caps->num_pi_qps = round_down(qp_num, HNS_ROCE_QP_BANK_NUM);
-
- qp_num = hr_reg_read(req, EXT_CFG_QP_NUM) / func_num;
- caps->num_qps = round_down(qp_num, HNS_ROCE_QP_BANK_NUM);
-
- return 0;
-}
-
static int load_pf_timer_res_caps(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc;
@@ -1723,50 +1751,37 @@ static int load_pf_timer_res_caps(struct hns_roce_dev *hr_dev)
return 0;
}
-static int query_func_resource_caps(struct hns_roce_dev *hr_dev, bool is_vf)
+static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
{
struct device *dev = hr_dev->dev;
int ret;
- ret = load_func_res_caps(hr_dev, is_vf);
+ ret = load_func_res_caps(hr_dev, false);
if (ret) {
- dev_err(dev, "failed to load res caps, ret = %d (%s).\n", ret,
- is_vf ? "vf" : "pf");
+ dev_err(dev, "failed to load pf res caps, ret = %d.\n", ret);
return ret;
}
- if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
- ret = load_ext_cfg_caps(hr_dev, is_vf);
- if (ret)
- dev_err(dev, "failed to load ext cfg, ret = %d (%s).\n",
- ret, is_vf ? "vf" : "pf");
- }
+ ret = load_pf_timer_res_caps(hr_dev);
+ if (ret)
+ dev_err(dev, "failed to load pf timer resource, ret = %d.\n",
+ ret);
return ret;
}
-static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
+static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev)
{
struct device *dev = hr_dev->dev;
int ret;
- ret = query_func_resource_caps(hr_dev, false);
+ ret = load_func_res_caps(hr_dev, true);
if (ret)
- return ret;
-
- ret = load_pf_timer_res_caps(hr_dev);
- if (ret)
- dev_err(dev, "failed to load pf timer resource, ret = %d.\n",
- ret);
+ dev_err(dev, "failed to load vf res caps, ret = %d.\n", ret);
return ret;
}
-static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev)
-{
- return query_func_resource_caps(hr_dev, true);
-}
-
static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
u32 vf_id)
{
@@ -1849,24 +1864,6 @@ static int config_vf_hem_resource(struct hns_roce_dev *hr_dev, int vf_id)
return hns_roce_cmq_send(hr_dev, desc, 2);
}
-static int config_vf_ext_resource(struct hns_roce_dev *hr_dev, u32 vf_id)
-{
- struct hns_roce_cmq_desc desc;
- struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
- struct hns_roce_caps *caps = &hr_dev->caps;
-
- hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_EXT_CFG, false);
-
- hr_reg_write(req, EXT_CFG_VF_ID, vf_id);
-
- hr_reg_write(req, EXT_CFG_QP_PI_NUM, caps->num_pi_qps);
- hr_reg_write(req, EXT_CFG_QP_PI_IDX, vf_id * caps->num_pi_qps);
- hr_reg_write(req, EXT_CFG_QP_NUM, caps->num_qps);
- hr_reg_write(req, EXT_CFG_QP_IDX, vf_id * caps->num_qps);
-
- return hns_roce_cmq_send(hr_dev, &desc, 1);
-}
-
static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
{
u32 func_num = max_t(u32, 1, hr_dev->func_num);
@@ -1881,16 +1878,6 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
vf_id, ret);
return ret;
}
-
- if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
- ret = config_vf_ext_resource(hr_dev, vf_id);
- if (ret) {
- dev_err(hr_dev->dev,
- "failed to config vf-%u ext res, ret = %d.\n",
- vf_id, ret);
- return ret;
- }
- }
}
return 0;
@@ -2075,9 +2062,6 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
- caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM;
- caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM;
-
caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
@@ -2200,6 +2184,7 @@ static int hns_roce_query_caps(struct hns_roce_dev *hr_dev)
caps->num_cqs = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_CQS);
caps->gid_table_len[0] = hr_reg_read(resp_c, PF_CAPS_C_MAX_GID);
caps->max_cqes = 1 << hr_reg_read(resp_c, PF_CAPS_C_CQ_DEPTH);
+ caps->num_xrcds = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_XRCDS);
caps->num_mtpts = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_MRWS);
caps->num_qps = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_QPS);
caps->max_qp_init_rdma = hr_reg_read(resp_c, PF_CAPS_C_MAX_ORD);
@@ -2220,6 +2205,7 @@ static int hns_roce_query_caps(struct hns_roce_dev *hr_dev)
caps->reserved_mrws = hr_reg_read(resp_e, PF_CAPS_E_RSV_MRWS);
caps->chunk_sz = 1 << hr_reg_read(resp_e, PF_CAPS_E_CHUNK_SIZE_SHIFT);
caps->reserved_cqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_CQS);
+ caps->reserved_xrcds = hr_reg_read(resp_e, PF_CAPS_E_RSV_XRCDS);
caps->reserved_srqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_SRQS);
caps->reserved_lkey = hr_reg_read(resp_e, PF_CAPS_E_RSV_LKEYS);
@@ -6646,6 +6632,7 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.query_cqc = hns_roce_v2_query_cqc,
.query_qpc = hns_roce_v2_query_qpc,
.query_mpt = hns_roce_v2_query_mpt,
+ .query_hw_counter = hns_roce_hw_v2_query_counter,
.hns_roce_dev_ops = &hns_roce_v2_dev_ops,
.hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
};
@@ -6722,14 +6709,14 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
ret = hns_roce_init(hr_dev);
if (ret) {
dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
- goto error_failed_cfg;
+ goto error_failed_roce_init;
}
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
ret = free_mr_init(hr_dev);
if (ret) {
dev_err(hr_dev->dev, "failed to init free mr!\n");
- goto error_failed_roce_init;
+ goto error_failed_free_mr_init;
}
}
@@ -6737,10 +6724,10 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
return 0;
-error_failed_roce_init:
+error_failed_free_mr_init:
hns_roce_exit(hr_dev);
-error_failed_cfg:
+error_failed_roce_init:
kfree(hr_dev->priv);
error_failed_kzalloc:
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 7033eae2407c..cd97cbee682a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -42,7 +42,6 @@
#define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_XRCD_NUM 0x1000000
-#define HNS_ROCE_V2_RSV_XRCD_NUM 0
#define HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08 10
@@ -199,6 +198,7 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_QUERY_HW_VER = 0x8000,
HNS_ROCE_OPC_CFG_GLOBAL_PARAM = 0x8001,
HNS_ROCE_OPC_ALLOC_PF_RES = 0x8004,
+ HNS_ROCE_OPC_QUERY_COUNTER = 0x8206,
HNS_ROCE_OPC_QUERY_PF_RES = 0x8400,
HNS_ROCE_OPC_ALLOC_VF_RES = 0x8401,
HNS_ROCE_OPC_CFG_EXT_LLM = 0x8403,
@@ -220,7 +220,6 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_QUERY_VF_RES = 0x850e,
HNS_ROCE_OPC_CFG_GMV_TBL = 0x850f,
HNS_ROCE_OPC_CFG_GMV_BT = 0x8510,
- HNS_ROCE_OPC_EXT_CFG = 0x8512,
HNS_ROCE_QUERY_RAM_ECC = 0x8513,
HNS_SWITCH_PARAMETER_CFG = 0x1033,
};
@@ -957,15 +956,6 @@ struct hns_roce_func_clear {
#define HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL 40
#define HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT 20
-/* Fields of HNS_ROCE_OPC_EXT_CFG */
-#define EXT_CFG_VF_ID CMQ_REQ_FIELD_LOC(31, 0)
-#define EXT_CFG_QP_PI_IDX CMQ_REQ_FIELD_LOC(45, 32)
-#define EXT_CFG_QP_PI_NUM CMQ_REQ_FIELD_LOC(63, 48)
-#define EXT_CFG_QP_NUM CMQ_REQ_FIELD_LOC(87, 64)
-#define EXT_CFG_QP_IDX CMQ_REQ_FIELD_LOC(119, 96)
-#define EXT_CFG_LLM_IDX CMQ_REQ_FIELD_LOC(139, 128)
-#define EXT_CFG_LLM_NUM CMQ_REQ_FIELD_LOC(156, 144)
-
#define CFG_LLM_A_BA_L CMQ_REQ_FIELD_LOC(31, 0)
#define CFG_LLM_A_BA_H CMQ_REQ_FIELD_LOC(63, 32)
#define CFG_LLM_A_DEPTH CMQ_REQ_FIELD_LOC(76, 64)
@@ -1202,6 +1192,7 @@ struct hns_roce_query_pf_caps_c {
#define PF_CAPS_C_NUM_CQS PF_CAPS_C_FIELD_LOC(51, 32)
#define PF_CAPS_C_MAX_GID PF_CAPS_C_FIELD_LOC(60, 52)
#define PF_CAPS_C_CQ_DEPTH PF_CAPS_C_FIELD_LOC(86, 64)
+#define PF_CAPS_C_NUM_XRCDS PF_CAPS_C_FIELD_LOC(91, 87)
#define PF_CAPS_C_NUM_MRWS PF_CAPS_C_FIELD_LOC(115, 96)
#define PF_CAPS_C_NUM_QPS PF_CAPS_C_FIELD_LOC(147, 128)
#define PF_CAPS_C_MAX_ORD PF_CAPS_C_FIELD_LOC(155, 148)
@@ -1260,6 +1251,7 @@ struct hns_roce_query_pf_caps_e {
#define PF_CAPS_E_RSV_MRWS PF_CAPS_E_FIELD_LOC(19, 0)
#define PF_CAPS_E_CHUNK_SIZE_SHIFT PF_CAPS_E_FIELD_LOC(31, 20)
#define PF_CAPS_E_RSV_CQS PF_CAPS_E_FIELD_LOC(51, 32)
+#define PF_CAPS_E_RSV_XRCDS PF_CAPS_E_FIELD_LOC(63, 52)
#define PF_CAPS_E_RSV_SRQS PF_CAPS_E_FIELD_LOC(83, 64)
#define PF_CAPS_E_RSV_LKEYS PF_CAPS_E_FIELD_LOC(115, 96)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 485e110ca433..d9d546cdef52 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -219,6 +219,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,
unsigned long flags;
enum ib_mtu mtu;
u32 port;
+ int ret;
port = port_num - 1;
@@ -231,8 +232,10 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,
IB_PORT_BOOT_MGMT_SUP;
props->max_msg_sz = HNS_ROCE_MAX_MSG_LEN;
props->pkey_tbl_len = 1;
- props->active_width = IB_WIDTH_4X;
- props->active_speed = 1;
+ ret = ib_get_eth_speed(ib_dev, port_num, &props->active_speed,
+ &props->active_width);
+ if (ret)
+ ibdev_warn(ib_dev, "failed to get speed, ret = %d.\n", ret);
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
@@ -512,6 +515,83 @@ static void hns_roce_get_fw_ver(struct ib_device *device, char *str)
sub_minor);
}
+#define HNS_ROCE_HW_CNT(ename, cname) \
+ [HNS_ROCE_HW_##ename##_CNT].name = cname
+
+static const struct rdma_stat_desc hns_roce_port_stats_descs[] = {
+ HNS_ROCE_HW_CNT(RX_RC_PKT, "rx_rc_pkt"),
+ HNS_ROCE_HW_CNT(RX_UC_PKT, "rx_uc_pkt"),
+ HNS_ROCE_HW_CNT(RX_UD_PKT, "rx_ud_pkt"),
+ HNS_ROCE_HW_CNT(RX_XRC_PKT, "rx_xrc_pkt"),
+ HNS_ROCE_HW_CNT(RX_PKT, "rx_pkt"),
+ HNS_ROCE_HW_CNT(RX_ERR_PKT, "rx_err_pkt"),
+ HNS_ROCE_HW_CNT(RX_CNP_PKT, "rx_cnp_pkt"),
+ HNS_ROCE_HW_CNT(TX_RC_PKT, "tx_rc_pkt"),
+ HNS_ROCE_HW_CNT(TX_UC_PKT, "tx_uc_pkt"),
+ HNS_ROCE_HW_CNT(TX_UD_PKT, "tx_ud_pkt"),
+ HNS_ROCE_HW_CNT(TX_XRC_PKT, "tx_xrc_pkt"),
+ HNS_ROCE_HW_CNT(TX_PKT, "tx_pkt"),
+ HNS_ROCE_HW_CNT(TX_ERR_PKT, "tx_err_pkt"),
+ HNS_ROCE_HW_CNT(TX_CNP_PKT, "tx_cnp_pkt"),
+ HNS_ROCE_HW_CNT(TRP_GET_MPT_ERR_PKT, "trp_get_mpt_err_pkt"),
+ HNS_ROCE_HW_CNT(TRP_GET_IRRL_ERR_PKT, "trp_get_irrl_err_pkt"),
+ HNS_ROCE_HW_CNT(ECN_DB, "ecn_doorbell"),
+ HNS_ROCE_HW_CNT(RX_BUF, "rx_buffer"),
+ HNS_ROCE_HW_CNT(TRP_RX_SOF, "trp_rx_sof"),
+ HNS_ROCE_HW_CNT(CQ_CQE, "cq_cqe"),
+ HNS_ROCE_HW_CNT(CQ_POE, "cq_poe"),
+ HNS_ROCE_HW_CNT(CQ_NOTIFY, "cq_notify"),
+};
+
+static struct rdma_hw_stats *hns_roce_alloc_hw_port_stats(
+ struct ib_device *device, u32 port_num)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(device);
+ u32 port = port_num - 1;
+
+ if (port > hr_dev->caps.num_ports) {
+ ibdev_err(device, "invalid port num.\n");
+ return NULL;
+ }
+
+ if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 ||
+ hr_dev->is_vf)
+ return NULL;
+
+ return rdma_alloc_hw_stats_struct(hns_roce_port_stats_descs,
+ ARRAY_SIZE(hns_roce_port_stats_descs),
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static int hns_roce_get_hw_stats(struct ib_device *device,
+ struct rdma_hw_stats *stats,
+ u32 port, int index)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(device);
+ int num_counters = HNS_ROCE_HW_CNT_TOTAL;
+ int ret;
+
+ if (port == 0)
+ return 0;
+
+ if (port > hr_dev->caps.num_ports)
+ return -EINVAL;
+
+ if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 ||
+ hr_dev->is_vf)
+ return -EOPNOTSUPP;
+
+ ret = hr_dev->hw->query_hw_counter(hr_dev, stats->value, port,
+ &num_counters);
+ if (ret) {
+ ibdev_err(device, "failed to query hw counter, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ return num_counters;
+}
+
static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
{
struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
@@ -554,6 +634,8 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.query_pkey = hns_roce_query_pkey,
.query_port = hns_roce_query_port,
.reg_user_mr = hns_roce_reg_user_mr,
+ .alloc_hw_port_stats = hns_roce_alloc_hw_port_stats,
+ .get_hw_stats = hns_roce_get_hw_stats,
INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq),
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index d855a917f4cf..cdc1c6de43a1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -170,14 +170,29 @@ static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
}
}
-static u8 get_least_load_bankid_for_qp(struct hns_roce_bank *bank)
+static u8 get_affinity_cq_bank(u8 qp_bank)
{
- u32 least_load = bank[0].inuse;
+ return (qp_bank >> 1) & CQ_BANKID_MASK;
+}
+
+static u8 get_least_load_bankid_for_qp(struct ib_qp_init_attr *init_attr,
+ struct hns_roce_bank *bank)
+{
+#define INVALID_LOAD_QPNUM 0xFFFFFFFF
+ struct ib_cq *scq = init_attr->send_cq;
+ u32 least_load = INVALID_LOAD_QPNUM;
+ unsigned long cqn = 0;
u8 bankid = 0;
u32 bankcnt;
u8 i;
- for (i = 1; i < HNS_ROCE_QP_BANK_NUM; i++) {
+ if (scq)
+ cqn = to_hr_cq(scq)->cqn;
+
+ for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) {
+ if (scq && (get_affinity_cq_bank(i) != (cqn & CQ_BANKID_MASK)))
+ continue;
+
bankcnt = bank[i].inuse;
if (bankcnt < least_load) {
least_load = bankcnt;
@@ -209,7 +224,8 @@ static int alloc_qpn_with_bankid(struct hns_roce_bank *bank, u8 bankid,
return 0;
}
-static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_qp_init_attr *init_attr)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
unsigned long num = 0;
@@ -220,7 +236,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
num = 1;
} else {
mutex_lock(&qp_table->bank_mutex);
- bankid = get_least_load_bankid_for_qp(qp_table->bank);
+ bankid = get_least_load_bankid_for_qp(init_attr, qp_table->bank);
ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
&num);
@@ -1082,7 +1098,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_buf;
}
- ret = alloc_qpn(hr_dev, hr_qp);
+ ret = alloc_qpn(hr_dev, hr_qp, init_attr);
if (ret) {
ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret);
goto err_qpn;
diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
index 989a2af2e938..081a01de3055 100644
--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
+++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
@@ -9,8 +9,6 @@
#include "hns_roce_device.h"
#include "hns_roce_hw_v2.h"
-#define MAX_ENTRY_NUM 256
-
int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq)
{
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
@@ -47,8 +45,6 @@ int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq)
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
struct hns_roce_v2_cq_context context;
- u32 data[MAX_ENTRY_NUM] = {};
- int offset = 0;
int ret;
if (!hr_dev->hw->query_cqc)
@@ -58,23 +54,7 @@ int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq)
if (ret)
return -EINVAL;
- data[offset++] = hr_reg_read(&context, CQC_CQ_ST);
- data[offset++] = hr_reg_read(&context, CQC_SHIFT);
- data[offset++] = hr_reg_read(&context, CQC_CQE_SIZE);
- data[offset++] = hr_reg_read(&context, CQC_CQE_CNT);
- data[offset++] = hr_reg_read(&context, CQC_CQ_PRODUCER_IDX);
- data[offset++] = hr_reg_read(&context, CQC_CQ_CONSUMER_IDX);
- data[offset++] = hr_reg_read(&context, CQC_DB_RECORD_EN);
- data[offset++] = hr_reg_read(&context, CQC_ARM_ST);
- data[offset++] = hr_reg_read(&context, CQC_CMD_SN);
- data[offset++] = hr_reg_read(&context, CQC_CEQN);
- data[offset++] = hr_reg_read(&context, CQC_CQ_MAX_CNT);
- data[offset++] = hr_reg_read(&context, CQC_CQ_PERIOD);
- data[offset++] = hr_reg_read(&context, CQC_CQE_HOP_NUM);
- data[offset++] = hr_reg_read(&context, CQC_CQE_BAR_PG_SZ);
- data[offset++] = hr_reg_read(&context, CQC_CQE_BUF_PG_SZ);
-
- ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data);
+ ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
return ret;
}
@@ -118,8 +98,6 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
struct hns_roce_v2_qp_context context;
- u32 data[MAX_ENTRY_NUM] = {};
- int offset = 0;
int ret;
if (!hr_dev->hw->query_qpc)
@@ -129,42 +107,7 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
if (ret)
return -EINVAL;
- data[offset++] = hr_reg_read(&context, QPC_QP_ST);
- data[offset++] = hr_reg_read(&context, QPC_ERR_TYPE);
- data[offset++] = hr_reg_read(&context, QPC_CHECK_FLG);
- data[offset++] = hr_reg_read(&context, QPC_SRQ_EN);
- data[offset++] = hr_reg_read(&context, QPC_SRQN);
- data[offset++] = hr_reg_read(&context, QPC_QKEY_XRCD);
- data[offset++] = hr_reg_read(&context, QPC_TX_CQN);
- data[offset++] = hr_reg_read(&context, QPC_RX_CQN);
- data[offset++] = hr_reg_read(&context, QPC_SQ_PRODUCER_IDX);
- data[offset++] = hr_reg_read(&context, QPC_SQ_CONSUMER_IDX);
- data[offset++] = hr_reg_read(&context, QPC_RQ_RECORD_EN);
- data[offset++] = hr_reg_read(&context, QPC_RQ_PRODUCER_IDX);
- data[offset++] = hr_reg_read(&context, QPC_RQ_CONSUMER_IDX);
- data[offset++] = hr_reg_read(&context, QPC_SQ_SHIFT);
- data[offset++] = hr_reg_read(&context, QPC_RQWS);
- data[offset++] = hr_reg_read(&context, QPC_RQ_SHIFT);
- data[offset++] = hr_reg_read(&context, QPC_SGE_SHIFT);
- data[offset++] = hr_reg_read(&context, QPC_SQ_HOP_NUM);
- data[offset++] = hr_reg_read(&context, QPC_RQ_HOP_NUM);
- data[offset++] = hr_reg_read(&context, QPC_SGE_HOP_NUM);
- data[offset++] = hr_reg_read(&context, QPC_WQE_SGE_BA_PG_SZ);
- data[offset++] = hr_reg_read(&context, QPC_WQE_SGE_BUF_PG_SZ);
- data[offset++] = hr_reg_read(&context, QPC_RETRY_NUM_INIT);
- data[offset++] = hr_reg_read(&context, QPC_RETRY_CNT);
- data[offset++] = hr_reg_read(&context, QPC_SQ_CUR_PSN);
- data[offset++] = hr_reg_read(&context, QPC_SQ_MAX_PSN);
- data[offset++] = hr_reg_read(&context, QPC_SQ_FLUSH_IDX);
- data[offset++] = hr_reg_read(&context, QPC_SQ_MAX_IDX);
- data[offset++] = hr_reg_read(&context, QPC_SQ_TX_ERR);
- data[offset++] = hr_reg_read(&context, QPC_SQ_RX_ERR);
- data[offset++] = hr_reg_read(&context, QPC_RQ_RX_ERR);
- data[offset++] = hr_reg_read(&context, QPC_RQ_TX_ERR);
- data[offset++] = hr_reg_read(&context, QPC_RQ_CQE_IDX);
- data[offset++] = hr_reg_read(&context, QPC_RQ_RTY_TX_ERR);
-
- ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data);
+ ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
return ret;
}
@@ -204,8 +147,6 @@ int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr)
struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device);
struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
struct hns_roce_v2_mpt_entry context;
- u32 data[MAX_ENTRY_NUM] = {};
- int offset = 0;
int ret;
if (!hr_dev->hw->query_mpt)
@@ -215,17 +156,7 @@ int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr)
if (ret)
return -EINVAL;
- data[offset++] = hr_reg_read(&context, MPT_ST);
- data[offset++] = hr_reg_read(&context, MPT_PD);
- data[offset++] = hr_reg_read(&context, MPT_LKEY);
- data[offset++] = hr_reg_read(&context, MPT_LEN_L);
- data[offset++] = hr_reg_read(&context, MPT_LEN_H);
- data[offset++] = hr_reg_read(&context, MPT_PBL_SIZE);
- data[offset++] = hr_reg_read(&context, MPT_PBL_HOP_NUM);
- data[offset++] = hr_reg_read(&context, MPT_PBL_BA_PG_SZ);
- data[offset++] = hr_reg_read(&context, MPT_PBL_BUF_PG_SZ);
-
- ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data);
+ ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
return ret;
}
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
index 70009b970e08..42d1e9771066 100644
--- a/drivers/infiniband/hw/irdma/cm.c
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -1555,22 +1555,56 @@ static int irdma_del_multiple_qhash(struct irdma_device *iwdev,
return ret;
}
+static u8 irdma_iw_get_vlan_prio(u32 *loc_addr, u8 prio, bool ipv4)
+{
+ struct net_device *ndev = NULL;
+
+ rcu_read_lock();
+ if (ipv4) {
+ ndev = ip_dev_find(&init_net, htonl(loc_addr[0]));
+ } else if (IS_ENABLED(CONFIG_IPV6)) {
+ struct net_device *ip_dev;
+ struct in6_addr laddr6;
+
+ irdma_copy_ip_htonl(laddr6.in6_u.u6_addr32, loc_addr);
+
+ for_each_netdev_rcu (&init_net, ip_dev) {
+ if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) {
+ ndev = ip_dev;
+ break;
+ }
+ }
+ }
+
+ if (!ndev)
+ goto done;
+ if (is_vlan_dev(ndev))
+ prio = (vlan_dev_get_egress_qos_mask(ndev, prio) & VLAN_PRIO_MASK)
+ >> VLAN_PRIO_SHIFT;
+ if (ipv4)
+ dev_put(ndev);
+
+done:
+ rcu_read_unlock();
+
+ return prio;
+}
+
/**
- * irdma_netdev_vlan_ipv6 - Gets the netdev and mac
+ * irdma_get_vlan_mac_ipv6 - Gets the vlan and mac
* @addr: local IPv6 address
* @vlan_id: vlan id for the given IPv6 address
* @mac: mac address for the given IPv6 address
*
- * Returns the net_device of the IPv6 address and also sets the
- * vlan id and mac for that address.
+ * Returns the vlan id and mac for an IPv6 address.
*/
-struct net_device *irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
+void irdma_get_vlan_mac_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
{
struct net_device *ip_dev = NULL;
struct in6_addr laddr6;
if (!IS_ENABLED(CONFIG_IPV6))
- return NULL;
+ return;
irdma_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
if (vlan_id)
@@ -1589,8 +1623,6 @@ struct net_device *irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
}
}
rcu_read_unlock();
-
- return ip_dev;
}
/**
@@ -1667,6 +1699,12 @@ static int irdma_add_mqh_6(struct irdma_device *iwdev,
ifp->addr.in6_u.u6_addr32);
memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
sizeof(cm_info->loc_addr));
+ if (!iwdev->vsi.dscp_mode)
+ cm_info->user_pri =
+ irdma_iw_get_vlan_prio(child_listen_node->loc_addr,
+ cm_info->user_pri,
+ false);
+
ret = irdma_manage_qhash(iwdev, cm_info,
IRDMA_QHASH_TYPE_TCP_SYN,
IRDMA_QHASH_MANAGE_TYPE_ADD,
@@ -1751,6 +1789,11 @@ static int irdma_add_mqh_4(struct irdma_device *iwdev,
ntohl(ifa->ifa_address);
memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
sizeof(cm_info->loc_addr));
+ if (!iwdev->vsi.dscp_mode)
+ cm_info->user_pri =
+ irdma_iw_get_vlan_prio(child_listen_node->loc_addr,
+ cm_info->user_pri,
+ true);
ret = irdma_manage_qhash(iwdev, cm_info,
IRDMA_QHASH_TYPE_TCP_SYN,
IRDMA_QHASH_MANAGE_TYPE_ADD,
@@ -2219,6 +2262,10 @@ irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev,
} else {
cm_node->tos = max(listener->tos, cm_info->tos);
cm_node->user_pri = rt_tos2priority(cm_node->tos);
+ cm_node->user_pri =
+ irdma_iw_get_vlan_prio(cm_info->loc_addr,
+ cm_node->user_pri,
+ cm_info->ipv4);
}
ibdev_dbg(&iwdev->ibdev,
"DCB: listener: TOS:[%d] UP:[%d]\n", cm_node->tos,
@@ -3577,7 +3624,6 @@ void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp)
iwqp->ietf_mem.size, iwqp->ietf_mem.va,
iwqp->ietf_mem.pa);
iwqp->ietf_mem.va = NULL;
- iwqp->ietf_mem.va = NULL;
}
}
@@ -3617,8 +3663,8 @@ int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_node->vlan_id = irdma_get_vlan_ipv4(cm_node->loc_addr);
} else {
cm_node->ipv4 = false;
- irdma_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id,
- NULL);
+ irdma_get_vlan_mac_ipv6(cm_node->loc_addr, &cm_node->vlan_id,
+ NULL);
}
ibdev_dbg(&iwdev->ibdev, "CM: Accept vlan_id=%d\n",
cm_node->vlan_id);
@@ -3826,17 +3872,21 @@ int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
raddr6->sin6_addr.in6_u.u6_addr32);
cm_info.loc_port = ntohs(laddr6->sin6_port);
cm_info.rem_port = ntohs(raddr6->sin6_port);
- irdma_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id,
- NULL);
+ irdma_get_vlan_mac_ipv6(cm_info.loc_addr, &cm_info.vlan_id,
+ NULL);
}
cm_info.cm_id = cm_id;
cm_info.qh_qpid = iwdev->vsi.ilq->qp_id;
cm_info.tos = cm_id->tos;
- if (iwdev->vsi.dscp_mode)
+ if (iwdev->vsi.dscp_mode) {
cm_info.user_pri =
iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(cm_info.tos)];
- else
+ } else {
cm_info.user_pri = rt_tos2priority(cm_id->tos);
+ cm_info.user_pri = irdma_iw_get_vlan_prio(cm_info.loc_addr,
+ cm_info.user_pri,
+ cm_info.ipv4);
+ }
if (iwqp->sc_qp.dev->ws_add(iwqp->sc_qp.vsi, cm_info.user_pri))
return -ENOMEM;
@@ -3952,8 +4002,8 @@ int irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
laddr6->sin6_addr.in6_u.u6_addr32);
cm_info.loc_port = ntohs(laddr6->sin6_port);
if (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY) {
- irdma_netdev_vlan_ipv6(cm_info.loc_addr,
- &cm_info.vlan_id, NULL);
+ irdma_get_vlan_mac_ipv6(cm_info.loc_addr,
+ &cm_info.vlan_id, NULL);
} else {
cm_info.vlan_id = 0xFFFF;
wildcard = true;
@@ -3980,7 +4030,7 @@ int irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_listen_node->tos = cm_id->tos;
if (iwdev->vsi.dscp_mode)
cm_listen_node->user_pri =
- iwdev->vsi.dscp_map[irdma_tos2dscp(cm_id->tos)];
+ iwdev->vsi.dscp_map[irdma_tos2dscp(cm_id->tos)];
else
cm_listen_node->user_pri = rt_tos2priority(cm_id->tos);
cm_info.user_pri = cm_listen_node->user_pri;
@@ -3990,6 +4040,12 @@ int irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
if (err)
goto error;
} else {
+ if (!iwdev->vsi.dscp_mode)
+ cm_listen_node->user_pri =
+ irdma_iw_get_vlan_prio(cm_info.loc_addr,
+ cm_info.user_pri,
+ cm_info.ipv4);
+ cm_info.user_pri = cm_listen_node->user_pri;
err = irdma_manage_qhash(iwdev, &cm_info,
IRDMA_QHASH_TYPE_TCP_SYN,
IRDMA_QHASH_MANAGE_TYPE_ADD,
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index 45e3344daa04..8a6200e55c54 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -1061,6 +1061,9 @@ static int irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
u64 hdr;
enum irdma_page_size page_size;
+ if (!info->total_len && !info->all_memory)
+ return -EINVAL;
+
if (info->page_size == 0x40000000)
page_size = IRDMA_PAGE_SIZE_1G;
else if (info->page_size == 0x200000)
@@ -1126,6 +1129,9 @@ static int irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
u8 addr_type;
enum irdma_page_size page_size;
+ if (!info->total_len && !info->all_memory)
+ return -EINVAL;
+
if (info->page_size == 0x40000000)
page_size = IRDMA_PAGE_SIZE_1G;
else if (info->page_size == 0x200000)
@@ -1301,7 +1307,6 @@ int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
sq_info.wr_id = info->wr_id;
sq_info.signaled = info->signaled;
- sq_info.push_wqe = info->push_wqe;
wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx,
IRDMA_QP_WQE_MIN_QUANTA, 0, &sq_info);
@@ -1335,7 +1340,6 @@ int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
FIELD_PREP(IRDMAQPSQ_HPAGESIZE, page_size) |
FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, info->access_rights) |
FIELD_PREP(IRDMAQPSQ_VABASEDTO, info->addr_type) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, (sq_info.push_wqe ? 1 : 0)) |
FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
@@ -1346,13 +1350,9 @@ int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
print_hex_dump_debug("WQE: FAST_REG WQE", DUMP_PREFIX_OFFSET, 16, 8,
wqe, IRDMA_QP_WQE_MIN_SIZE, false);
- if (sq_info.push_wqe) {
- irdma_qp_push_wqe(&qp->qp_uk, wqe, IRDMA_QP_WQE_MIN_QUANTA,
- wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(&qp->qp_uk);
- }
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(&qp->qp_uk);
return 0;
}
@@ -4007,7 +4007,6 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
{
u64 temp, compl_ctx;
__le64 *aeqe;
- u16 wqe_idx;
u8 ae_src;
u8 polarity;
@@ -4027,7 +4026,7 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
aeqe, 16, false);
ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp);
- wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp);
+ info->wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp);
info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) |
((u32)FIELD_GET(IRDMA_AEQE_QPCQID_HI, temp) << 18);
info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp);
@@ -4110,7 +4109,6 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
case IRDMA_AE_SOURCE_RQ_0011:
info->qp = true;
info->rq = true;
- info->wqe_idx = wqe_idx;
info->compl_ctx = compl_ctx;
break;
case IRDMA_AE_SOURCE_CQ:
@@ -4124,7 +4122,6 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
case IRDMA_AE_SOURCE_SQ_0111:
info->qp = true;
info->sq = true;
- info->wqe_idx = wqe_idx;
info->compl_ctx = compl_ctx;
break;
case IRDMA_AE_SOURCE_IN_RR_WR:
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index 457368e324e1..7cbdd5433dba 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -219,7 +219,6 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
struct irdma_aeqe_info *info = &aeinfo;
int ret;
struct irdma_qp *iwqp = NULL;
- struct irdma_sc_cq *cq = NULL;
struct irdma_cq *iwcq = NULL;
struct irdma_sc_qp *qp = NULL;
struct irdma_qp_host_ctx_info *ctx_info = NULL;
@@ -336,10 +335,18 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
ibdev_err(&iwdev->ibdev,
"Processing an iWARP related AE for CQ misc = 0x%04X\n",
info->ae_id);
- cq = (struct irdma_sc_cq *)(unsigned long)
- info->compl_ctx;
- iwcq = cq->back_cq;
+ spin_lock_irqsave(&rf->cqtable_lock, flags);
+ iwcq = rf->cq_table[info->qp_cq_id];
+ if (!iwcq) {
+ spin_unlock_irqrestore(&rf->cqtable_lock,
+ flags);
+ ibdev_dbg(to_ibdev(dev),
+ "cq_id %d is already freed\n", info->qp_cq_id);
+ continue;
+ }
+ irdma_cq_add_ref(&iwcq->ibcq);
+ spin_unlock_irqrestore(&rf->cqtable_lock, flags);
if (iwcq->ibcq.event_handler) {
struct ib_event ibevent;
@@ -350,6 +357,7 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
iwcq->ibcq.event_handler(&ibevent,
iwcq->ibcq.cq_context);
}
+ irdma_cq_rem_ref(&iwcq->ibcq);
break;
case IRDMA_AE_RESET_NOT_SENT:
case IRDMA_AE_LLP_DOUBT_REACHABILITY:
@@ -563,12 +571,11 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf,
/**
* irdma_destroy_cqp - destroy control qp
* @rf: RDMA PCI function
- * @free_hwcqp: 1 if hw cqp should be freed
*
* Issue destroy cqp request and
* free the resources associated with the cqp
*/
-static void irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp)
+static void irdma_destroy_cqp(struct irdma_pci_f *rf)
{
struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_cqp *cqp = &rf->cqp;
@@ -576,8 +583,8 @@ static void irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp)
if (rf->cqp_cmpl_wq)
destroy_workqueue(rf->cqp_cmpl_wq);
- if (free_hwcqp)
- status = irdma_sc_cqp_destroy(dev->cqp);
+
+ status = irdma_sc_cqp_destroy(dev->cqp);
if (status)
ibdev_dbg(to_ibdev(dev), "ERR: Destroy CQP failed %d\n", status);
@@ -921,8 +928,8 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
if (!cqp->scratch_array) {
- kfree(cqp->cqp_requests);
- return -ENOMEM;
+ status = -ENOMEM;
+ goto err_scratch;
}
dev->cqp = &cqp->sc_cqp;
@@ -932,15 +939,14 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
cqp->sq.va = dma_alloc_coherent(dev->hw->device, cqp->sq.size,
&cqp->sq.pa, GFP_KERNEL);
if (!cqp->sq.va) {
- kfree(cqp->scratch_array);
- kfree(cqp->cqp_requests);
- return -ENOMEM;
+ status = -ENOMEM;
+ goto err_sq;
}
status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx),
IRDMA_HOST_CTX_ALIGNMENT_M);
if (status)
- goto exit;
+ goto err_ctx;
dev->cqp->host_ctx_pa = mem.pa;
dev->cqp->host_ctx = mem.va;
@@ -966,7 +972,7 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info);
if (status) {
ibdev_dbg(to_ibdev(dev), "ERR: cqp init status %d\n", status);
- goto exit;
+ goto err_ctx;
}
spin_lock_init(&cqp->req_lock);
@@ -977,7 +983,7 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
ibdev_dbg(to_ibdev(dev),
"ERR: cqp create failed - status %d maj_err %d min_err %d\n",
status, maj_err, min_err);
- goto exit;
+ goto err_ctx;
}
INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
@@ -991,8 +997,16 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
init_waitqueue_head(&cqp->remove_wq);
return 0;
-exit:
- irdma_destroy_cqp(rf, false);
+err_ctx:
+ dma_free_coherent(dev->hw->device, cqp->sq.size,
+ cqp->sq.va, cqp->sq.pa);
+ cqp->sq.va = NULL;
+err_sq:
+ kfree(cqp->scratch_array);
+ cqp->scratch_array = NULL;
+err_scratch:
+ kfree(cqp->cqp_requests);
+ cqp->cqp_requests = NULL;
return status;
}
@@ -1549,7 +1563,7 @@ static void irdma_del_init_mem(struct irdma_pci_f *rf)
kfree(dev->hmc_info->sd_table.sd_entry);
dev->hmc_info->sd_table.sd_entry = NULL;
- kfree(rf->mem_rsrc);
+ vfree(rf->mem_rsrc);
rf->mem_rsrc = NULL;
dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
rf->obj_mem.pa);
@@ -1747,7 +1761,7 @@ void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
rf->reset, rf->rdma_ver);
fallthrough;
case CQP_CREATED:
- irdma_destroy_cqp(rf, true);
+ irdma_destroy_cqp(rf);
fallthrough;
case INITIAL_STATE:
irdma_del_init_mem(rf);
@@ -1945,10 +1959,12 @@ static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)];
rf->qp_table = (struct irdma_qp **)
(&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]);
+ rf->cq_table = (struct irdma_cq **)(&rf->qp_table[rf->max_qp]);
spin_lock_init(&rf->rsrc_lock);
spin_lock_init(&rf->arp_lock);
spin_lock_init(&rf->qptable_lock);
+ spin_lock_init(&rf->cqtable_lock);
spin_lock_init(&rf->qh_list_lock);
}
@@ -1969,6 +1985,7 @@ static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf)
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah);
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg);
rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp;
+ rsrc_size += sizeof(struct irdma_cq **) * rf->max_cq;
return rsrc_size;
}
@@ -2002,10 +2019,10 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
rf->max_mcg = rf->max_qp;
rsrc_size = irdma_calc_mem_rsrc_size(rf);
- rf->mem_rsrc = kzalloc(rsrc_size, GFP_KERNEL);
+ rf->mem_rsrc = vzalloc(rsrc_size);
if (!rf->mem_rsrc) {
ret = -ENOMEM;
- goto mem_rsrc_kzalloc_fail;
+ goto mem_rsrc_vzalloc_fail;
}
rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc;
@@ -2033,7 +2050,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
return 0;
-mem_rsrc_kzalloc_fail:
+mem_rsrc_vzalloc_fail:
bitmap_free(rf->allocated_ws_nodes);
rf->allocated_ws_nodes = NULL;
diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.c b/drivers/infiniband/hw/irdma/i40iw_hw.c
index 37a40fb4d0d7..638d127fb3e0 100644
--- a/drivers/infiniband/hw/irdma/i40iw_hw.c
+++ b/drivers/infiniband/hw/irdma/i40iw_hw.c
@@ -254,5 +254,6 @@ void i40iw_init_hw(struct irdma_sc_dev *dev)
dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_1;
dev->hw_attrs.max_hw_outbound_msg_size = I40IW_MAX_OUTBOUND_MSG_SIZE;
dev->hw_attrs.max_hw_inbound_msg_size = I40IW_MAX_INBOUND_MSG_SIZE;
+ dev->hw_attrs.uk_attrs.min_hw_wq_size = I40IW_MIN_WQ_SIZE;
dev->hw_attrs.max_qp_wr = I40IW_MAX_QP_WRS;
}
diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.h b/drivers/infiniband/hw/irdma/i40iw_hw.h
index 1c438b3593ea..10afc165f5ea 100644
--- a/drivers/infiniband/hw/irdma/i40iw_hw.h
+++ b/drivers/infiniband/hw/irdma/i40iw_hw.h
@@ -140,11 +140,11 @@ enum i40iw_device_caps_const {
I40IW_MAX_CQ_SIZE = 1048575,
I40IW_MAX_OUTBOUND_MSG_SIZE = 2147483647,
I40IW_MAX_INBOUND_MSG_SIZE = 2147483647,
+ I40IW_MIN_WQ_SIZE = 4 /* WQEs */,
};
#define I40IW_QP_WQE_MIN_SIZE 32
#define I40IW_QP_WQE_MAX_SIZE 128
-#define I40IW_QP_SW_MIN_WQSIZE 4
#define I40IW_MAX_RQ_WQE_SHIFT 2
#define I40IW_MAX_QUANTA_PER_WR 2
diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.c b/drivers/infiniband/hw/irdma/icrdma_hw.c
index 298d14905993..10ccf4bc3f2d 100644
--- a/drivers/infiniband/hw/irdma/icrdma_hw.c
+++ b/drivers/infiniband/hw/irdma/icrdma_hw.c
@@ -195,6 +195,7 @@ void icrdma_init_hw(struct irdma_sc_dev *dev)
dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2;
+ dev->hw_attrs.uk_attrs.min_hw_wq_size = ICRDMA_MIN_WQ_SIZE;
dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |
IRDMA_FEATURE_CQ_RESIZE;
diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.h b/drivers/infiniband/hw/irdma/icrdma_hw.h
index b65c463abf0b..54035a08cc93 100644
--- a/drivers/infiniband/hw/irdma/icrdma_hw.h
+++ b/drivers/infiniband/hw/irdma/icrdma_hw.h
@@ -64,6 +64,7 @@ enum icrdma_device_caps_const {
ICRDMA_MAX_IRD_SIZE = 127,
ICRDMA_MAX_ORD_SIZE = 255,
+ ICRDMA_MIN_WQ_SIZE = 8 /* WQEs */,
};
diff --git a/drivers/infiniband/hw/irdma/irdma.h b/drivers/infiniband/hw/irdma/irdma.h
index 173e2dc2fc35..3237fa64bc8f 100644
--- a/drivers/infiniband/hw/irdma/irdma.h
+++ b/drivers/infiniband/hw/irdma/irdma.h
@@ -119,6 +119,7 @@ struct irdma_uk_attrs {
u32 min_hw_cq_size;
u32 max_hw_cq_size;
u16 max_hw_sq_chunk;
+ u16 min_hw_wq_size;
u8 hw_rev;
};
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index 2323962cdeac..82fc5f5b002c 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -239,7 +239,7 @@ struct irdma_qv_info {
struct irdma_qvlist_info {
u32 num_vectors;
- struct irdma_qv_info qv_info[1];
+ struct irdma_qv_info qv_info[];
};
struct irdma_gen_ops {
@@ -309,7 +309,9 @@ struct irdma_pci_f {
spinlock_t arp_lock; /*protect ARP table access*/
spinlock_t rsrc_lock; /* protect HW resource array access */
spinlock_t qptable_lock; /*protect QP table access*/
+ spinlock_t cqtable_lock; /*protect CQ table access*/
struct irdma_qp **qp_table;
+ struct irdma_cq **cq_table;
spinlock_t qh_list_lock; /* protect mc_qht_list */
struct mc_table_list mc_qht_list;
struct irdma_msix_vector *iw_msixtbl;
@@ -500,6 +502,8 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
struct ib_udata *udata);
int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
+void irdma_cq_add_ref(struct ib_cq *ibcq);
+void irdma_cq_rem_ref(struct ib_cq *ibcq);
void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf);
@@ -529,7 +533,7 @@ void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
void irdma_copy_ip_ntohl(u32 *dst, __be32 *src);
void irdma_copy_ip_htonl(__be32 *dst, u32 *src);
u16 irdma_get_vlan_ipv4(u32 *addr);
-struct net_device *irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac);
+void irdma_get_vlan_mac_ipv6(u32 *addr, u16 *vlan_id, u8 *mac);
struct ib_mr *irdma_reg_phys_mr(struct ib_pd *ib_pd, u64 addr, u64 size,
int acc, u64 *iova_start);
int irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw);
diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
index a20709577ab0..c84ec4dd8536 100644
--- a/drivers/infiniband/hw/irdma/type.h
+++ b/drivers/infiniband/hw/irdma/type.h
@@ -971,6 +971,7 @@ struct irdma_allocate_stag_info {
bool remote_access:1;
bool use_hmc_fcn_index:1;
bool use_pf_rid:1;
+ bool all_memory:1;
u8 hmc_fcn_index;
};
@@ -998,6 +999,7 @@ struct irdma_reg_ns_stag_info {
bool use_hmc_fcn_index:1;
u8 hmc_fcn_index;
bool use_pf_rid:1;
+ bool all_memory:1;
};
struct irdma_fast_reg_stag_info {
@@ -1017,7 +1019,6 @@ struct irdma_fast_reg_stag_info {
bool local_fence:1;
bool read_fence:1;
bool signaled:1;
- bool push_wqe:1;
bool use_hmc_fcn_index:1;
u8 hmc_fcn_index;
bool use_pf_rid:1;
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
index 280d633d4ec4..d8285ca16293 100644
--- a/drivers/infiniband/hw/irdma/uk.c
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -127,10 +127,7 @@ void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
if (sw_sq_head != qp->initial_ring.head) {
- if (qp->push_dropped) {
- writel(qp->qp_id, qp->wqe_alloc_db);
- qp->push_dropped = false;
- } else if (sw_sq_head != hw_sq_tail) {
+ if (sw_sq_head != hw_sq_tail) {
if (sw_sq_head > qp->initial_ring.head) {
if (hw_sq_tail >= qp->initial_ring.head &&
hw_sq_tail < sw_sq_head)
@@ -147,38 +144,6 @@ void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
}
/**
- * irdma_qp_ring_push_db - ring qp doorbell
- * @qp: hw qp ptr
- * @wqe_idx: wqe index
- */
-static void irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
-{
- set_32bit_val(qp->push_db, 0,
- FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id);
- qp->initial_ring.head = qp->sq_ring.head;
- qp->push_mode = true;
- qp->push_dropped = false;
-}
-
-void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
- u32 wqe_idx, bool post_sq)
-{
- __le64 *push;
-
- if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
- IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
- !qp->push_mode) {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- } else {
- push = (__le64 *)((uintptr_t)qp->push_wqe +
- (wqe_idx & 0x7) * 0x20);
- memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE);
- irdma_qp_ring_push_db(qp, wqe_idx);
- }
-}
-
-/**
* irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
* @qp: hw qp ptr
* @wqe_idx: return wqe index
@@ -192,7 +157,6 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
{
__le64 *wqe;
__le64 *wqe_0 = NULL;
- u32 nop_wqe_idx;
u16 avail_quanta;
u16 i;
@@ -209,14 +173,10 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
return NULL;
- nop_wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
for (i = 0; i < avail_quanta; i++) {
irdma_nop_1(qp);
IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
}
- if (qp->push_db && info->push_wqe)
- irdma_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem,
- avail_quanta, nop_wqe_idx, true);
}
*wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
@@ -282,8 +242,6 @@ int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
bool read_fence = false;
u16 quanta;
- info->push_wqe = qp->push_db ? true : false;
-
op_info = &info->op.rdma_write;
if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
return -EINVAL;
@@ -344,7 +302,6 @@ int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
@@ -353,12 +310,9 @@ int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
dma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, 24, hdr);
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -383,8 +337,6 @@ int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
u16 quanta;
u64 hdr;
- info->push_wqe = qp->push_db ? true : false;
-
op_info = &info->op.rdma_read;
if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
return -EINVAL;
@@ -431,7 +383,6 @@ int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
FIELD_PREP(IRDMAQPSQ_OPCODE,
(inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
@@ -440,12 +391,9 @@ int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
dma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, 24, hdr);
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -468,8 +416,6 @@ int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
bool read_fence = false;
u16 quanta;
- info->push_wqe = qp->push_db ? true : false;
-
op_info = &info->op.send;
if (qp->max_sq_frag_cnt < op_info->num_sges)
return -EINVAL;
@@ -530,7 +476,6 @@ int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
@@ -541,12 +486,9 @@ int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
dma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, 24, hdr);
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -720,7 +662,6 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
u32 i, total_size = 0;
u16 quanta;
- info->push_wqe = qp->push_db ? true : false;
op_info = &info->op.rdma_write;
if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
@@ -750,7 +691,6 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) |
FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
@@ -767,12 +707,8 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
set_64bit_val(wqe, 24, hdr);
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -794,7 +730,6 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp,
u32 i, total_size = 0;
u16 quanta;
- info->push_wqe = qp->push_db ? true : false;
op_info = &info->op.send;
if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
@@ -827,7 +762,6 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp,
(info->imm_data_valid ? 1 : 0)) |
FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
@@ -845,12 +779,8 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp,
set_64bit_val(wqe, 24, hdr);
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -872,7 +802,6 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
bool local_fence = false;
struct ib_sge sge = {};
- info->push_wqe = qp->push_db ? true : false;
op_info = &info->op.inv_local_stag;
local_fence = info->local_fence;
@@ -889,7 +818,6 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
set_64bit_val(wqe, 16, 0);
hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
@@ -899,13 +827,8 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
set_64bit_val(wqe, 24, hdr);
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
- post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -1124,7 +1047,6 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
- info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
if (info->error) {
info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
@@ -1213,11 +1135,6 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
return irdma_uk_cq_poll_cmpl(cq, info);
}
}
- /*cease posting push mode on push drop*/
- if (info->push_dropped) {
- qp->push_mode = false;
- qp->push_dropped = true;
- }
if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
if (!info->comp_status)
@@ -1349,10 +1266,12 @@ void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
u32 *sqdepth)
{
+ u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
+
*sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
- if (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
- *sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
+ if (*sqdepth < min_size)
+ *sqdepth = min_size;
else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
return -EINVAL;
@@ -1369,10 +1288,12 @@ int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
u32 *rqdepth)
{
+ u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
+
*rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
- if (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
- *rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
+ if (*rqdepth < min_size)
+ *rqdepth = min_size;
else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
return -EINVAL;
@@ -1415,6 +1336,78 @@ static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
}
/**
+ * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
+ * @ukinfo: qp initialization info
+ * @sq_shift: Returns shift of SQ
+ * @rq_shift: Returns shift of RQ
+ */
+void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
+ u8 *rq_shift)
+{
+ bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs,
+ imm_support ? ukinfo->max_sq_frag_cnt + 1 :
+ ukinfo->max_sq_frag_cnt,
+ ukinfo->max_inline_data, sq_shift);
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
+ rq_shift);
+
+ if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
+ if (ukinfo->abi_ver > 4)
+ *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
+ }
+}
+
+/**
+ * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
+ * @ukinfo: qp initialization info
+ * @sq_depth: Returns depth of SQ
+ * @sq_shift: Returns shift of SQ
+ */
+int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *sq_depth, u8 *sq_shift)
+{
+ bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
+ int status;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs,
+ imm_support ? ukinfo->max_sq_frag_cnt + 1 :
+ ukinfo->max_sq_frag_cnt,
+ ukinfo->max_inline_data, sq_shift);
+ status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size,
+ *sq_shift, sq_depth);
+
+ return status;
+}
+
+/**
+ * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size.
+ * @ukinfo: qp initialization info
+ * @rq_depth: Returns depth of RQ
+ * @rq_shift: Returns shift of RQ
+ */
+int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *rq_depth, u8 *rq_shift)
+{
+ int status;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
+ rq_shift);
+
+ if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
+ if (ukinfo->abi_ver > 4)
+ *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
+ }
+
+ status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size,
+ *rq_shift, rq_depth);
+
+ return status;
+}
+
+/**
* irdma_uk_qp_init - initialize shared qp
* @qp: hw qp (user and kernel)
* @info: qp initialization info
@@ -1428,23 +1421,12 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
{
int ret_code = 0;
u32 sq_ring_size;
- u8 sqshift, rqshift;
qp->uk_attrs = info->uk_attrs;
if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
return -EINVAL;
- irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);
- if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {
- irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt,
- info->max_inline_data, &sqshift);
- if (info->abi_ver > 4)
- rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
- } else {
- irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1,
- info->max_inline_data, &sqshift);
- }
qp->qp_caps = info->qp_caps;
qp->sq_base = info->sq;
qp->rq_base = info->rq;
@@ -1456,9 +1438,8 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
qp->wqe_alloc_db = info->wqe_alloc_db;
qp->qp_id = info->qp_id;
qp->sq_size = info->sq_size;
- qp->push_mode = false;
qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
- sq_ring_size = qp->sq_size << sqshift;
+ sq_ring_size = qp->sq_size << info->sq_shift;
IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
if (info->first_sq_wq) {
@@ -1473,9 +1454,9 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
qp->rq_size = info->rq_size;
qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
qp->max_inline_data = info->max_inline_data;
- qp->rq_wqe_size = rqshift;
+ qp->rq_wqe_size = info->rq_shift;
IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
- qp->rq_wqe_size_multiplier = 1 << rqshift;
+ qp->rq_wqe_size_multiplier = 1 << info->rq_shift;
if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
qp->wqe_ops = iw_wqe_uk_ops_gen_1;
else
@@ -1554,7 +1535,6 @@ int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
u32 wqe_idx;
struct irdma_post_sq_info info = {};
- info.push_wqe = false;
info.wr_id = wr_id;
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
0, &info);
diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
index d0cdf609f5e0..36feca57b274 100644
--- a/drivers/infiniband/hw/irdma/user.h
+++ b/drivers/infiniband/hw/irdma/user.h
@@ -85,6 +85,7 @@ enum irdma_device_caps_const {
IRDMA_Q2_BUF_SIZE = 256,
IRDMA_QP_CTX_SIZE = 256,
IRDMA_MAX_PDS = 262144,
+ IRDMA_MIN_WQ_SIZE_GEN2 = 8,
};
enum irdma_addressing_type {
@@ -215,7 +216,6 @@ struct irdma_post_sq_info {
bool local_fence:1;
bool inline_data:1;
bool imm_data_valid:1;
- bool push_wqe:1;
bool report_rtt:1;
bool udp_hdr:1;
bool defer_flag:1;
@@ -247,7 +247,6 @@ struct irdma_cq_poll_info {
u8 op_type;
u8 q_type;
bool stag_invalid_set:1; /* or L_R_Key set */
- bool push_dropped:1;
bool error:1;
bool solicited_event:1;
bool ipv4:1;
@@ -295,6 +294,12 @@ void irdma_uk_cq_init(struct irdma_cq_uk *cq,
struct irdma_cq_uk_init_info *info);
int irdma_uk_qp_init(struct irdma_qp_uk *qp,
struct irdma_qp_uk_init_info *info);
+void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
+ u8 *rq_shift);
+int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *sq_depth, u8 *sq_shift);
+int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *rq_depth, u8 *rq_shift);
struct irdma_sq_uk_wr_trk_info {
u64 wrid;
u32 wr_len;
@@ -314,8 +319,6 @@ struct irdma_qp_uk {
struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
u64 *rq_wrid_array;
__le64 *shadow_area;
- __le32 *push_db;
- __le64 *push_wqe;
struct irdma_ring sq_ring;
struct irdma_ring rq_ring;
struct irdma_ring initial_ring;
@@ -335,8 +338,6 @@ struct irdma_qp_uk {
u8 rq_wqe_size;
u8 rq_wqe_size_multiplier;
bool deferred_flag:1;
- bool push_mode:1; /* whether the last post wqe was pushed */
- bool push_dropped:1;
bool first_sq_wq:1;
bool sq_flush_complete:1; /* Indicates flush was seen and SQ was empty after the flush */
bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */
@@ -374,8 +375,12 @@ struct irdma_qp_uk_init_info {
u32 max_sq_frag_cnt;
u32 max_rq_frag_cnt;
u32 max_inline_data;
+ u32 sq_depth;
+ u32 rq_depth;
u8 first_sq_wq;
u8 type;
+ u8 sq_shift;
+ u8 rq_shift;
int abi_ver;
bool legacy_mode;
};
@@ -404,7 +409,5 @@ int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
u32 *wqdepth);
int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
u32 *wqdepth);
-void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
- u32 wqe_idx, bool post_sq);
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
#endif /* IRDMA_USER_H */
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index eb083f70b09f..6cd5cb85dafe 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -760,6 +760,31 @@ void irdma_qp_rem_ref(struct ib_qp *ibqp)
complete(&iwqp->free_qp);
}
+void irdma_cq_add_ref(struct ib_cq *ibcq)
+{
+ struct irdma_cq *iwcq = to_iwcq(ibcq);
+
+ refcount_inc(&iwcq->refcnt);
+}
+
+void irdma_cq_rem_ref(struct ib_cq *ibcq)
+{
+ struct ib_device *ibdev = ibcq->device;
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+ struct irdma_cq *iwcq = to_iwcq(ibcq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwdev->rf->cqtable_lock, flags);
+ if (!refcount_dec_and_test(&iwcq->refcnt)) {
+ spin_unlock_irqrestore(&iwdev->rf->cqtable_lock, flags);
+ return;
+ }
+
+ iwdev->rf->cq_table[iwcq->cq_num] = NULL;
+ spin_unlock_irqrestore(&iwdev->rf->cqtable_lock, flags);
+ complete(&iwcq->free_cq);
+}
+
struct ib_device *to_ibdev(struct irdma_sc_dev *dev)
{
return &(container_of(dev, struct irdma_pci_f, sc_dev))->iwdev->ibdev;
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 9c4fe4fa9001..3eb7a7a3a975 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -277,7 +277,7 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
struct irdma_alloc_ucontext_req req = {};
struct irdma_alloc_ucontext_resp uresp = {};
struct irdma_ucontext *ucontext = to_ucontext(uctx);
- struct irdma_uk_attrs *uk_attrs;
+ struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
@@ -292,7 +292,9 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
ucontext->iwdev = iwdev;
ucontext->abi_ver = req.userspace_ver;
- uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
+ if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
+ ucontext->use_raw_attrs = true;
+
/* GEN_1 legacy support with libi40iw */
if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
if (uk_attrs->hw_rev != IRDMA_GEN_1)
@@ -327,6 +329,9 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
uresp.hw_rev = uk_attrs->hw_rev;
+ uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
+ uresp.min_hw_wq_size = uk_attrs->min_hw_wq_size;
+ uresp.comp_mask |= IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE;
if (ib_copy_to_udata(udata, &uresp,
min(sizeof(uresp), udata->outlen))) {
rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
@@ -567,6 +572,87 @@ static void irdma_setup_virt_qp(struct irdma_device *iwdev,
}
/**
+ * irdma_setup_umode_qp - setup sq and rq size in user mode qp
+ * @udata: udata
+ * @iwdev: iwarp device
+ * @iwqp: qp ptr (user or kernel)
+ * @info: initialize info to return
+ * @init_attr: Initial QP create attributes
+ */
+static int irdma_setup_umode_qp(struct ib_udata *udata,
+ struct irdma_device *iwdev,
+ struct irdma_qp *iwqp,
+ struct irdma_qp_init_info *info,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata,
+ struct irdma_ucontext, ibucontext);
+ struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
+ struct irdma_create_qp_req req;
+ unsigned long flags;
+ int ret;
+
+ ret = ib_copy_from_udata(&req, udata,
+ min(sizeof(req), udata->inlen));
+ if (ret) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: ib_copy_from_data fail\n");
+ return ret;
+ }
+
+ iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
+ iwqp->user_mode = 1;
+ if (req.user_wqe_bufs) {
+ info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+ iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
+ &ucontext->qp_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+
+ if (!iwqp->iwpbl) {
+ ret = -ENODATA;
+ ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
+ return ret;
+ }
+ }
+
+ if (!ucontext->use_raw_attrs) {
+ /**
+ * Maintain backward compat with older ABI which passes sq and
+ * rq depth in quanta in cap.max_send_wr and cap.max_recv_wr.
+ * There is no way to compute the correct value of
+ * iwqp->max_send_wr/max_recv_wr in the kernel.
+ */
+ iwqp->max_send_wr = init_attr->cap.max_send_wr;
+ iwqp->max_recv_wr = init_attr->cap.max_recv_wr;
+ ukinfo->sq_size = init_attr->cap.max_send_wr;
+ ukinfo->rq_size = init_attr->cap.max_recv_wr;
+ irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift,
+ &ukinfo->rq_shift);
+ } else {
+ ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
+ &ukinfo->sq_shift);
+ if (ret)
+ return ret;
+
+ ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
+ &ukinfo->rq_shift);
+ if (ret)
+ return ret;
+
+ iwqp->max_send_wr =
+ (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
+ iwqp->max_recv_wr =
+ (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
+ ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
+ ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
+ }
+
+ irdma_setup_virt_qp(iwdev, iwqp, info);
+
+ return 0;
+}
+
+/**
* irdma_setup_kmode_qp - setup initialization for kernel mode qp
* @iwdev: iwarp device
* @iwqp: qp ptr (user or kernel)
@@ -579,40 +665,28 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
struct ib_qp_init_attr *init_attr)
{
struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
- u32 sqdepth, rqdepth;
- u8 sqshift, rqshift;
u32 size;
int status;
struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
- struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
- irdma_get_wqe_shift(uk_attrs,
- uk_attrs->hw_rev >= IRDMA_GEN_2 ? ukinfo->max_sq_frag_cnt + 1 :
- ukinfo->max_sq_frag_cnt,
- ukinfo->max_inline_data, &sqshift);
- status = irdma_get_sqdepth(uk_attrs, ukinfo->sq_size, sqshift,
- &sqdepth);
+ status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
+ &ukinfo->sq_shift);
if (status)
return status;
- if (uk_attrs->hw_rev == IRDMA_GEN_1)
- rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
- else
- irdma_get_wqe_shift(uk_attrs, ukinfo->max_rq_frag_cnt, 0,
- &rqshift);
-
- status = irdma_get_rqdepth(uk_attrs, ukinfo->rq_size, rqshift,
- &rqdepth);
+ status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
+ &ukinfo->rq_shift);
if (status)
return status;
iwqp->kqp.sq_wrid_mem =
- kcalloc(sqdepth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
+ kcalloc(ukinfo->sq_depth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
if (!iwqp->kqp.sq_wrid_mem)
return -ENOMEM;
iwqp->kqp.rq_wrid_mem =
- kcalloc(rqdepth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
+ kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
+
if (!iwqp->kqp.rq_wrid_mem) {
kfree(iwqp->kqp.sq_wrid_mem);
iwqp->kqp.sq_wrid_mem = NULL;
@@ -622,7 +696,7 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
- size = (sqdepth + rqdepth) * IRDMA_QP_WQE_MIN_SIZE;
+ size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE;
size += (IRDMA_SHADOW_AREA_SIZE << 3);
mem->size = ALIGN(size, 256);
@@ -638,16 +712,19 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
ukinfo->sq = mem->va;
info->sq_pa = mem->pa;
- ukinfo->rq = &ukinfo->sq[sqdepth];
- info->rq_pa = info->sq_pa + (sqdepth * IRDMA_QP_WQE_MIN_SIZE);
- ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
- info->shadow_area_pa = info->rq_pa + (rqdepth * IRDMA_QP_WQE_MIN_SIZE);
- ukinfo->sq_size = sqdepth >> sqshift;
- ukinfo->rq_size = rqdepth >> rqshift;
+ ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth];
+ info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE);
+ ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem;
+ info->shadow_area_pa =
+ info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
+ ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
+ ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
ukinfo->qp_id = iwqp->ibqp.qp_num;
- init_attr->cap.max_send_wr = (sqdepth - IRDMA_SQ_RSVD) >> sqshift;
- init_attr->cap.max_recv_wr = (rqdepth - IRDMA_RQ_RSVD) >> rqshift;
+ iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
+ iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
+ init_attr->cap.max_send_wr = iwqp->max_send_wr;
+ init_attr->cap.max_recv_wr = iwqp->max_recv_wr;
return 0;
}
@@ -803,18 +880,14 @@ static int irdma_create_qp(struct ib_qp *ibqp,
struct irdma_device *iwdev = to_iwdev(ibpd->device);
struct irdma_pci_f *rf = iwdev->rf;
struct irdma_qp *iwqp = to_iwqp(ibqp);
- struct irdma_create_qp_req req = {};
struct irdma_create_qp_resp uresp = {};
u32 qp_num = 0;
int err_code;
- int sq_size;
- int rq_size;
struct irdma_sc_qp *qp;
struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
struct irdma_qp_init_info init_info = {};
struct irdma_qp_host_ctx_info *ctx_info;
- unsigned long flags;
err_code = irdma_validate_qp_attrs(init_attr, iwdev);
if (err_code)
@@ -824,13 +897,10 @@ static int irdma_create_qp(struct ib_qp *ibqp,
udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
return -EINVAL;
- sq_size = init_attr->cap.max_send_wr;
- rq_size = init_attr->cap.max_recv_wr;
-
init_info.vsi = &iwdev->vsi;
init_info.qp_uk_init_info.uk_attrs = uk_attrs;
- init_info.qp_uk_init_info.sq_size = sq_size;
- init_info.qp_uk_init_info.rq_size = rq_size;
+ init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr;
+ init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr;
init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
@@ -880,36 +950,9 @@ static int irdma_create_qp(struct ib_qp *ibqp,
init_waitqueue_head(&iwqp->mod_qp_waitq);
if (udata) {
- err_code = ib_copy_from_udata(&req, udata,
- min(sizeof(req), udata->inlen));
- if (err_code) {
- ibdev_dbg(&iwdev->ibdev,
- "VERBS: ib_copy_from_data fail\n");
- goto error;
- }
-
- iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
- iwqp->user_mode = 1;
- if (req.user_wqe_bufs) {
- struct irdma_ucontext *ucontext =
- rdma_udata_to_drv_context(udata,
- struct irdma_ucontext,
- ibucontext);
-
- init_info.qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
- spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
- iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
- &ucontext->qp_reg_mem_list);
- spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
-
- if (!iwqp->iwpbl) {
- err_code = -ENODATA;
- ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
- goto error;
- }
- }
init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
- irdma_setup_virt_qp(iwdev, iwqp, &init_info);
+ err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info,
+ init_attr);
} else {
INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
@@ -962,10 +1005,8 @@ static int irdma_create_qp(struct ib_qp *ibqp,
refcount_set(&iwqp->refcnt, 1);
spin_lock_init(&iwqp->lock);
spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
- iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
+ iwqp->sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
rf->qp_table[qp_num] = iwqp;
- iwqp->max_send_wr = sq_size;
- iwqp->max_recv_wr = rq_size;
if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
if (dev->ws_add(&iwdev->vsi, 0)) {
@@ -986,8 +1027,8 @@ static int irdma_create_qp(struct ib_qp *ibqp,
if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
uresp.lsmm = 1;
}
- uresp.actual_sq_size = sq_size;
- uresp.actual_rq_size = rq_size;
+ uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size;
+ uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size;
uresp.qp_id = qp_num;
uresp.qp_caps = qp->qp_uk.qp_caps;
@@ -1098,6 +1139,24 @@ static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
return 0;
}
+static u8 irdma_roce_get_vlan_prio(const struct ib_gid_attr *attr, u8 prio)
+{
+ struct net_device *ndev;
+
+ rcu_read_lock();
+ ndev = rcu_dereference(attr->ndev);
+ if (!ndev)
+ goto exit;
+ if (is_vlan_dev(ndev)) {
+ u16 vlan_qos = vlan_dev_get_egress_qos_mask(ndev, prio);
+
+ prio = (vlan_qos & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ }
+exit:
+ rcu_read_unlock();
+ return prio;
+}
+
/**
* irdma_modify_qp_roce - modify qp request
* @ibqp: qp's pointer for modify
@@ -1174,7 +1233,8 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_AV) {
struct irdma_av *av = &iwqp->roce_ah.av;
- const struct ib_gid_attr *sgid_attr;
+ const struct ib_gid_attr *sgid_attr =
+ attr->ah_attr.grh.sgid_attr;
u16 vlan_id = VLAN_N_VID;
u32 local_ip[4];
@@ -1189,17 +1249,22 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
roce_info->dest_qp);
irdma_qp_rem_qos(&iwqp->sc_qp);
dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
- ctx_info->user_pri = rt_tos2priority(udp_info->tos);
- iwqp->sc_qp.user_pri = ctx_info->user_pri;
- if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
- return -ENOMEM;
- irdma_qp_add_qos(&iwqp->sc_qp);
+ if (iwqp->sc_qp.vsi->dscp_mode)
+ ctx_info->user_pri =
+ iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(udp_info->tos)];
+ else
+ ctx_info->user_pri = rt_tos2priority(udp_info->tos);
}
- sgid_attr = attr->ah_attr.grh.sgid_attr;
ret = rdma_read_gid_l2_fields(sgid_attr, &vlan_id,
ctx_info->roce_info->mac_addr);
if (ret)
return ret;
+ ctx_info->user_pri = irdma_roce_get_vlan_prio(sgid_attr,
+ ctx_info->user_pri);
+ if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
+ return -ENOMEM;
+ iwqp->sc_qp.user_pri = ctx_info->user_pri;
+ irdma_qp_add_qos(&iwqp->sc_qp);
if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
vlan_id = 0;
@@ -1781,6 +1846,9 @@ static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
irdma_process_resize_list(iwcq, iwdev, NULL);
spin_unlock_irqrestore(&iwcq->lock, flags);
+ irdma_cq_rem_ref(ib_cq);
+ wait_for_completion(&iwcq->free_cq);
+
irdma_cq_wq_destroy(iwdev->rf, cq);
spin_lock_irqsave(&iwceq->ce_lock, flags);
@@ -1990,6 +2058,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
cq = &iwcq->sc_cq;
cq->back_cq = iwcq;
+ refcount_set(&iwcq->refcnt, 1);
spin_lock_init(&iwcq->lock);
INIT_LIST_HEAD(&iwcq->resize_list);
INIT_LIST_HEAD(&iwcq->cmpl_generated);
@@ -2141,6 +2210,9 @@ static int irdma_create_cq(struct ib_cq *ibcq,
goto cq_destroy;
}
}
+ rf->cq_table[cq_num] = iwcq;
+ init_completion(&iwcq->free_cq);
+
return 0;
cq_destroy:
irdma_cq_wq_destroy(rf, cq);
@@ -2552,7 +2624,8 @@ static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
struct irdma_mr *iwmr)
{
struct irdma_allocate_stag_info *info;
- struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
+ struct ib_pd *pd = iwmr->ibmr.pd;
+ struct irdma_pd *iwpd = to_iwpd(pd);
int status;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
@@ -2568,6 +2641,7 @@ static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
info->pd_id = iwpd->sc_pd.pd_id;
info->total_len = iwmr->len;
+ info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
info->remote_access = true;
cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
cqp_info->post_sq = 1;
@@ -2615,6 +2689,8 @@ static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
iwmr->type = IRDMA_MEMREG_TYPE_MEM;
palloc = &iwpbl->pble_alloc;
iwmr->page_cnt = max_num_sg;
+ /* Use system PAGE_SIZE as the sg page sizes are unknown at this point */
+ iwmr->len = max_num_sg * PAGE_SIZE;
err_code = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
false);
if (err_code)
@@ -2694,7 +2770,8 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
{
struct irdma_pbl *iwpbl = &iwmr->iwpbl;
struct irdma_reg_ns_stag_info *stag_info;
- struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
+ struct ib_pd *pd = iwmr->ibmr.pd;
+ struct irdma_pd *iwpd = to_iwpd(pd);
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
@@ -2713,6 +2790,7 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
stag_info->total_len = iwmr->len;
stag_info->access_rights = irdma_get_mr_access(access);
stag_info->pd_id = iwpd->sc_pd.pd_id;
+ stag_info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
else
@@ -2794,8 +2872,8 @@ static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region,
enum irdma_memreg_type reg_type)
{
struct irdma_device *iwdev = to_iwdev(pd->device);
- struct irdma_pbl *iwpbl = NULL;
- struct irdma_mr *iwmr = NULL;
+ struct irdma_pbl *iwpbl;
+ struct irdma_mr *iwmr;
unsigned long pgsz_bitmap;
iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
@@ -3476,8 +3554,7 @@ static void irdma_process_cqe(struct ib_wc *entry,
set_ib_wc_op_sq(cq_poll_info, entry);
} else {
set_ib_wc_op_rq(cq_poll_info, entry,
- qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ?
- true : false);
+ qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM);
if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
cq_poll_info->stag_invalid_set) {
entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
@@ -3963,7 +4040,7 @@ static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) {
irdma_copy_ip_ntohl(ip_addr,
sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
- irdma_netdev_vlan_ipv6(ip_addr, &vlan_id, NULL);
+ irdma_get_vlan_mac_ipv6(ip_addr, &vlan_id, NULL);
ipv4 = false;
ibdev_dbg(&iwdev->ibdev,
"VERBS: qp_id=%d, IP6address=%pI6\n", ibqp->qp_num,
@@ -4261,9 +4338,12 @@ static int irdma_setup_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr)
ah_info->vlan_tag = 0;
if (ah_info->vlan_tag < VLAN_N_VID) {
+ u8 prio = rt_tos2priority(ah_info->tc_tos);
+
+ prio = irdma_roce_get_vlan_prio(sgid_attr, prio);
+
+ ah_info->vlan_tag |= (u16)prio << VLAN_PRIO_SHIFT;
ah_info->insert_vlan_tag = true;
- ah_info->vlan_tag |=
- rt_tos2priority(ah_info->tc_tos) << VLAN_PRIO_SHIFT;
}
return 0;
@@ -4424,7 +4504,6 @@ static int irdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos;
ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl;
ah_attr->grh.sgid_index = ah->sgid_index;
- ah_attr->grh.sgid_index = ah->sgid_index;
memcpy(&ah_attr->grh.dgid, &ah->dgid,
sizeof(ah_attr->grh.dgid));
}
diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
index a536e9fa85eb..5d7b983f47a2 100644
--- a/drivers/infiniband/hw/irdma/verbs.h
+++ b/drivers/infiniband/hw/irdma/verbs.h
@@ -18,7 +18,8 @@ struct irdma_ucontext {
struct list_head qp_reg_mem_list;
spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
int abi_ver;
- bool legacy_mode;
+ u8 legacy_mode : 1;
+ u8 use_raw_attrs : 1;
};
struct irdma_pd {
@@ -122,6 +123,8 @@ struct irdma_cq {
u32 cq_mem_size;
struct irdma_dma_mem kmem;
struct irdma_dma_mem kmem_shadow;
+ struct completion free_cq;
+ refcount_t refcnt;
spinlock_t lock; /* for poll cq */
struct irdma_pbl *iwpbl;
struct irdma_pbl *iwpbl_shadow;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 1f8d0d2c5f17..529db874d67c 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -136,7 +136,7 @@ static struct net_device *mlx4_ib_get_netdev(struct ib_device *device,
continue;
if (mlx4_is_bonded(ibdev->dev)) {
- struct net_device *upper = NULL;
+ struct net_device *upper;
upper = netdev_master_upper_dev_get_rcu(dev);
if (upper) {
@@ -261,7 +261,7 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
int ret = 0;
int hw_update = 0;
int i;
- struct gid_entry *gids = NULL;
+ struct gid_entry *gids;
u16 vlan_id = 0xffff;
u8 mac[ETH_ALEN];
@@ -300,8 +300,7 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
ret = -ENOMEM;
} else {
*context = port_gid_table->gids[free].ctx;
- memcpy(&port_gid_table->gids[free].gid,
- &attr->gid, sizeof(attr->gid));
+ port_gid_table->gids[free].gid = attr->gid;
port_gid_table->gids[free].gid_type = attr->gid_type;
port_gid_table->gids[free].vlan_id = vlan_id;
port_gid_table->gids[free].ctx->real_index = free;
@@ -352,7 +351,7 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
struct mlx4_port_gid_table *port_gid_table;
int ret = 0;
int hw_update = 0;
- struct gid_entry *gids = NULL;
+ struct gid_entry *gids;
if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
return -EINVAL;
@@ -438,8 +437,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
struct ib_udata *uhw)
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err;
int have_ib_ports;
struct mlx4_uverbs_ex_query_device cmd;
@@ -656,8 +655,8 @@ mlx4_ib_port_link_layer(struct ib_device *device, u32 port_num)
static int ib_link_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props, int netw_view)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int ext_active_speed;
int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
int err = -ENOMEM;
@@ -834,8 +833,8 @@ static int mlx4_ib_query_port(struct ib_device *ibdev, u32 port,
int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid, int netw_view)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
struct mlx4_ib_dev *dev = to_mdev(ibdev);
int clear = 0;
@@ -899,8 +898,8 @@ static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u32 port,
u64 *sl2vl_tbl)
{
union sl2vl_tbl_to_u64 sl2vl64;
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
int err = -ENOMEM;
int jj;
@@ -959,8 +958,8 @@ static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey, int netw_view)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
int err = -ENOMEM;
@@ -1975,8 +1974,8 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
static int init_node_data(struct mlx4_ib_dev *dev)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
int err = -ENOMEM;
@@ -2623,7 +2622,7 @@ static int mlx4_ib_probe(struct auxiliary_device *adev,
int num_req_counters;
int allocated;
u32 counter_index;
- struct counter_index *new_counter_index = NULL;
+ struct counter_index *new_counter_index;
pr_info_once("%s", mlx4_ib_version);
@@ -2946,7 +2945,7 @@ int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
{
int err;
size_t flow_size;
- struct ib_flow_attr *flow = NULL;
+ struct ib_flow_attr *flow;
struct ib_flow_spec_ib *ib_spec;
if (is_attach) {
@@ -2966,11 +2965,11 @@ int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
err = __mlx4_ib_create_flow(&mqp->ibqp, flow, MLX4_DOMAIN_NIC,
MLX4_FS_REGULAR, &mqp->reg_id);
- } else {
- err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
+ kfree(flow);
+ return err;
}
- kfree(flow);
- return err;
+
+ return __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
}
static void mlx4_ib_remove(struct auxiliary_device *adev)
@@ -3019,7 +3018,7 @@ static void mlx4_ib_remove(struct auxiliary_device *adev)
static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
{
- struct mlx4_ib_demux_work **dm = NULL;
+ struct mlx4_ib_demux_work **dm;
struct mlx4_dev *dev = ibdev->dev;
int i;
unsigned long flags;
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index 24ee79aa2122..88f534cf690e 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -223,7 +223,7 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
static int add_port_entries(struct mlx4_ib_dev *device, int port_num)
{
int i;
- char buff[11];
+ char buff[12];
struct mlx4_ib_iov_port *port = NULL;
int ret = 0 ;
struct ib_port_attr attr;
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
index 93257fa5aae8..8300ce622835 100644
--- a/drivers/infiniband/hw/mlx5/counters.c
+++ b/drivers/infiniband/hw/mlx5/counters.c
@@ -27,6 +27,7 @@ static const struct mlx5_ib_counter basic_q_cnts[] = {
INIT_Q_COUNTER(rx_write_requests),
INIT_Q_COUNTER(rx_read_requests),
INIT_Q_COUNTER(rx_atomic_requests),
+ INIT_Q_COUNTER(rx_dct_connect),
INIT_Q_COUNTER(out_of_buffer),
};
@@ -46,6 +47,7 @@ static const struct mlx5_ib_counter vport_basic_q_cnts[] = {
INIT_VPORT_Q_COUNTER(rx_write_requests),
INIT_VPORT_Q_COUNTER(rx_read_requests),
INIT_VPORT_Q_COUNTER(rx_atomic_requests),
+ INIT_VPORT_Q_COUNTER(rx_dct_connect),
INIT_VPORT_Q_COUNTER(out_of_buffer),
};
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 1e419e080b53..520034acf73a 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -2470,8 +2470,8 @@ destroy_res:
mlx5_steering_anchor_destroy_res(ft_prio);
put_flow_table:
put_flow_table(dev, ft_prio, true);
- mutex_unlock(&dev->flow_db->lock);
free_obj:
+ mutex_unlock(&dev->flow_db->lock);
kfree(obj);
return err;
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 9c8a7b206dcf..8102ef113b7e 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -308,8 +308,8 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
u16 packet_error;
@@ -338,8 +338,8 @@ out:
static int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
struct ib_smp *out_mad)
{
- struct ib_smp *in_mad = NULL;
- int err = -ENOMEM;
+ struct ib_smp *in_mad;
+ int err;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
if (!in_mad)
@@ -358,8 +358,8 @@ static int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
__be64 *sys_image_guid)
{
- struct ib_smp *out_mad = NULL;
- int err = -ENOMEM;
+ struct ib_smp *out_mad;
+ int err;
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!out_mad)
@@ -380,8 +380,8 @@ out:
int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
u16 *max_pkeys)
{
- struct ib_smp *out_mad = NULL;
- int err = -ENOMEM;
+ struct ib_smp *out_mad;
+ int err;
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!out_mad)
@@ -402,8 +402,8 @@ out:
int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
u32 *vendor_id)
{
- struct ib_smp *out_mad = NULL;
- int err = -ENOMEM;
+ struct ib_smp *out_mad;
+ int err;
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!out_mad)
@@ -423,8 +423,8 @@ out:
int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
@@ -448,8 +448,8 @@ out:
int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
@@ -474,8 +474,8 @@ out:
int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
@@ -503,8 +503,8 @@ out:
int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
@@ -545,8 +545,8 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int ext_active_speed;
int err = -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index aed5cdea50e6..555629b798b9 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2084,7 +2084,7 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
case MLX5_IB_MMAP_DEVICE_MEM:
return "Device Memory";
default:
- return NULL;
+ return "Unknown";
}
}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 2017ede100a6..8a3762d9ff58 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -301,7 +301,8 @@ static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs)
static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
{
- set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
+ set_mkc_access_pd_addr_fields(mkc, ent->rb_key.access_flags, 0,
+ ent->dev->umrc.pd);
MLX5_SET(mkc, mkc, free, 1);
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3);
@@ -1024,19 +1025,26 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
if (!dev->cache.wq)
return;
- cancel_delayed_work_sync(&dev->cache.remove_ent_dwork);
mutex_lock(&dev->cache.rb_lock);
for (node = rb_first(root); node; node = rb_next(node)) {
ent = rb_entry(node, struct mlx5_cache_ent, node);
xa_lock_irq(&ent->mkeys);
ent->disabled = true;
xa_unlock_irq(&ent->mkeys);
- cancel_delayed_work_sync(&ent->dwork);
}
+ mutex_unlock(&dev->cache.rb_lock);
+
+ /*
+ * After all entries are disabled and will not reschedule on WQ,
+ * flush it and all async commands.
+ */
+ flush_workqueue(dev->cache.wq);
mlx5_mkey_cache_debugfs_cleanup(dev);
mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
+ /* At this point all entries are disabled and have no concurrent work. */
+ mutex_lock(&dev->cache.rb_lock);
node = rb_first(root);
while (node) {
ent = rb_entry(node, struct mlx5_cache_ent, node);
@@ -1235,7 +1243,8 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
}
/* The pg_access bit allows setting the access flags
- * in the page list submitted with the command. */
+ * in the page list submitted with the command.
+ */
MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
@@ -1766,6 +1775,11 @@ mlx5_alloc_priv_descs(struct ib_device *device,
int ret;
add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
+ if (is_power_of_2(MLX5_UMR_ALIGN) && add_size) {
+ int end = max_t(int, MLX5_UMR_ALIGN, roundup_pow_of_two(size));
+
+ add_size = min_t(int, end - size, add_size);
+ }
mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
if (!mr->descs_alloc)
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index c46df53f26cf..e1325f2927d6 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -53,8 +53,8 @@
static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
struct ib_udata *uhw)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
struct mthca_dev *mdev = to_mdev(ibdev);
@@ -121,8 +121,8 @@ static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *pr
static int mthca_query_port(struct ib_device *ibdev,
u32 port, struct ib_port_attr *props)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
@@ -217,8 +217,8 @@ out:
static int mthca_query_pkey(struct ib_device *ibdev,
u32 port, u16 index, u16 *pkey)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
@@ -246,8 +246,8 @@ static int mthca_query_pkey(struct ib_device *ibdev,
static int mthca_query_gid(struct ib_device *ibdev, u32 port,
int index, union ib_gid *gid)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
@@ -989,8 +989,8 @@ static const struct attribute_group mthca_attr_group = {
static int mthca_init_node_data(struct mthca_dev *dev)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 58f994341e9a..c849fdbd4c99 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1277,7 +1277,7 @@ static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
qp->sq.max_sges = attrs->cap.max_send_sge;
qp->rq.max_sges = attrs->cap.max_recv_sge;
qp->state = OCRDMA_QPS_RST;
- qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
+ qp->signaled = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
}
static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
diff --git a/drivers/infiniband/hw/qedr/qedr_roce_cm.c b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
index 05307c1488b8..859f66a51bd2 100644
--- a/drivers/infiniband/hw/qedr/qedr_roce_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
@@ -354,7 +354,6 @@ int qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs,
/* the GSI CQ is handled by the driver so remove it from the FW */
qedr_destroy_gsi_cq(dev, attrs);
dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
- dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index d745ce9dc88a..7887a6786ed4 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1358,7 +1358,7 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
qp->prev_wqe_size = 0;
- qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
+ qp->signaled = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
qp->dev = dev;
if (qedr_qp_has_sq(qp)) {
qedr_reset_qp_hwq_info(&qp->sq);
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index ef85bc8d9384..152952127f13 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -2250,7 +2250,9 @@ static ssize_t qib_write_iter(struct kiocb *iocb, struct iov_iter *from)
return qib_user_sdma_writev(rcd, pq, iter_iov(from), from->nr_segs);
}
-static struct class *qib_class;
+static const struct class qib_class = {
+ .name = "ipath",
+};
static dev_t qib_dev;
int qib_cdev_init(int minor, const char *name,
@@ -2281,7 +2283,7 @@ int qib_cdev_init(int minor, const char *name,
goto err_cdev;
}
- device = device_create(qib_class, NULL, dev, NULL, "%s", name);
+ device = device_create(&qib_class, NULL, dev, NULL, "%s", name);
if (!IS_ERR(device))
goto done;
ret = PTR_ERR(device);
@@ -2325,9 +2327,8 @@ int __init qib_dev_init(void)
goto done;
}
- qib_class = class_create("ipath");
- if (IS_ERR(qib_class)) {
- ret = PTR_ERR(qib_class);
+ ret = class_register(&qib_class);
+ if (ret) {
pr_err("Could not create device class (err %d)\n", -ret);
unregister_chrdev_region(qib_dev, QIB_NMINORS);
}
@@ -2338,10 +2339,8 @@ done:
void qib_dev_cleanup(void)
{
- if (qib_class) {
- class_destroy(qib_class);
- qib_class = NULL;
- }
+ if (class_is_registered(&qib_class))
+ class_unregister(&qib_class);
unregister_chrdev_region(qib_dev, QIB_NMINORS);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 5111735aafae..d0bdc2d8adc8 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -597,6 +597,10 @@ static void flush_send_queue(struct rxe_qp *qp, bool notify)
struct rxe_queue *q = qp->sq.queue;
int err;
+ /* send queue never got created. nothing to do. */
+ if (!qp->sq.queue)
+ return;
+
while ((wqe = queue_head(q, q->type))) {
if (notify) {
err = flush_send_wqe(qp, wqe);
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 666e06a82bc9..4d2a8ef52c85 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -136,12 +136,6 @@ static inline int qp_mtu(struct rxe_qp *qp)
return IB_MTU_4096;
}
-static inline int rcv_wqe_size(int max_sge)
-{
- return sizeof(struct rxe_recv_wqe) +
- max_sge * sizeof(struct ib_sge);
-}
-
void free_rd_atomic_resource(struct resp_res *res);
static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index a569b111a9d2..28e379c108bc 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -183,13 +183,63 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
atomic_set(&qp->skb_out, 0);
}
+static int rxe_init_sq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
+ struct ib_udata *udata,
+ struct rxe_create_qp_resp __user *uresp)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ int wqe_size;
+ int err;
+
+ qp->sq.max_wr = init->cap.max_send_wr;
+ wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
+ init->cap.max_inline_data);
+ qp->sq.max_sge = wqe_size / sizeof(struct ib_sge);
+ qp->sq.max_inline = wqe_size;
+ wqe_size += sizeof(struct rxe_send_wqe);
+
+ qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size,
+ QUEUE_TYPE_FROM_CLIENT);
+ if (!qp->sq.queue) {
+ rxe_err_qp(qp, "Unable to allocate send queue");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ /* prepare info for caller to mmap send queue if user space qp */
+ err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
+ qp->sq.queue->buf, qp->sq.queue->buf_size,
+ &qp->sq.queue->ip);
+ if (err) {
+ rxe_err_qp(qp, "do_mmap_info failed, err = %d", err);
+ goto err_free;
+ }
+
+ /* return actual capabilities to caller which may be larger
+ * than requested
+ */
+ init->cap.max_send_wr = qp->sq.max_wr;
+ init->cap.max_send_sge = qp->sq.max_sge;
+ init->cap.max_inline_data = qp->sq.max_inline;
+
+ return 0;
+
+err_free:
+ vfree(qp->sq.queue->buf);
+ kfree(qp->sq.queue);
+ qp->sq.queue = NULL;
+err_out:
+ return err;
+}
+
static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
struct ib_qp_init_attr *init, struct ib_udata *udata,
struct rxe_create_qp_resp __user *uresp)
{
int err;
- int wqe_size;
- enum queue_type type;
+
+ /* if we don't finish qp create make sure queue is valid */
+ skb_queue_head_init(&qp->req_pkts);
err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
if (err < 0)
@@ -204,32 +254,10 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
* (0xc000 - 0xffff).
*/
qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff);
- qp->sq.max_wr = init->cap.max_send_wr;
-
- /* These caps are limited by rxe_qp_chk_cap() done by the caller */
- wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
- init->cap.max_inline_data);
- qp->sq.max_sge = init->cap.max_send_sge =
- wqe_size / sizeof(struct ib_sge);
- qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
- wqe_size += sizeof(struct rxe_send_wqe);
- type = QUEUE_TYPE_FROM_CLIENT;
- qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr,
- wqe_size, type);
- if (!qp->sq.queue)
- return -ENOMEM;
-
- err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
- qp->sq.queue->buf, qp->sq.queue->buf_size,
- &qp->sq.queue->ip);
-
- if (err) {
- vfree(qp->sq.queue->buf);
- kfree(qp->sq.queue);
- qp->sq.queue = NULL;
+ err = rxe_init_sq(qp, init, udata, uresp);
+ if (err)
return err;
- }
qp->req.wqe_index = queue_get_producer(qp->sq.queue,
QUEUE_TYPE_FROM_CLIENT);
@@ -248,36 +276,65 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
return 0;
}
+static int rxe_init_rq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
+ struct ib_udata *udata,
+ struct rxe_create_qp_resp __user *uresp)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ int wqe_size;
+ int err;
+
+ qp->rq.max_wr = init->cap.max_recv_wr;
+ qp->rq.max_sge = init->cap.max_recv_sge;
+ wqe_size = sizeof(struct rxe_recv_wqe) +
+ qp->rq.max_sge*sizeof(struct ib_sge);
+
+ qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size,
+ QUEUE_TYPE_FROM_CLIENT);
+ if (!qp->rq.queue) {
+ rxe_err_qp(qp, "Unable to allocate recv queue");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ /* prepare info for caller to mmap recv queue if user space qp */
+ err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
+ qp->rq.queue->buf, qp->rq.queue->buf_size,
+ &qp->rq.queue->ip);
+ if (err) {
+ rxe_err_qp(qp, "do_mmap_info failed, err = %d", err);
+ goto err_free;
+ }
+
+ /* return actual capabilities to caller which may be larger
+ * than requested
+ */
+ init->cap.max_recv_wr = qp->rq.max_wr;
+
+ return 0;
+
+err_free:
+ vfree(qp->rq.queue->buf);
+ kfree(qp->rq.queue);
+ qp->rq.queue = NULL;
+err_out:
+ return err;
+}
+
static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
struct ib_qp_init_attr *init,
struct ib_udata *udata,
struct rxe_create_qp_resp __user *uresp)
{
int err;
- int wqe_size;
- enum queue_type type;
+
+ /* if we don't finish qp create make sure queue is valid */
+ skb_queue_head_init(&qp->resp_pkts);
if (!qp->srq) {
- qp->rq.max_wr = init->cap.max_recv_wr;
- qp->rq.max_sge = init->cap.max_recv_sge;
-
- wqe_size = rcv_wqe_size(qp->rq.max_sge);
-
- type = QUEUE_TYPE_FROM_CLIENT;
- qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr,
- wqe_size, type);
- if (!qp->rq.queue)
- return -ENOMEM;
-
- err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
- qp->rq.queue->buf, qp->rq.queue->buf_size,
- &qp->rq.queue->ip);
- if (err) {
- vfree(qp->rq.queue->buf);
- kfree(qp->rq.queue);
- qp->rq.queue = NULL;
+ err = rxe_init_rq(qp, init, udata, uresp);
+ if (err)
return err;
- }
}
rxe_init_task(&qp->resp.task, qp, rxe_responder);
@@ -307,10 +364,10 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
if (srq)
rxe_get(srq);
- qp->pd = pd;
- qp->rcq = rcq;
- qp->scq = scq;
- qp->srq = srq;
+ qp->pd = pd;
+ qp->rcq = rcq;
+ qp->scq = scq;
+ qp->srq = srq;
atomic_inc(&rcq->num_wq);
atomic_inc(&scq->num_wq);
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 2171f19494bc..d8c41fd626a9 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -578,10 +578,11 @@ static void save_state(struct rxe_send_wqe *wqe,
struct rxe_send_wqe *rollback_wqe,
u32 *rollback_psn)
{
- rollback_wqe->state = wqe->state;
+ rollback_wqe->state = wqe->state;
rollback_wqe->first_psn = wqe->first_psn;
- rollback_wqe->last_psn = wqe->last_psn;
- *rollback_psn = qp->req.psn;
+ rollback_wqe->last_psn = wqe->last_psn;
+ rollback_wqe->dma = wqe->dma;
+ *rollback_psn = qp->req.psn;
}
static void rollback_state(struct rxe_send_wqe *wqe,
@@ -589,10 +590,11 @@ static void rollback_state(struct rxe_send_wqe *wqe,
struct rxe_send_wqe *rollback_wqe,
u32 rollback_psn)
{
- wqe->state = rollback_wqe->state;
+ wqe->state = rollback_wqe->state;
wqe->first_psn = rollback_wqe->first_psn;
- wqe->last_psn = rollback_wqe->last_psn;
- qp->req.psn = rollback_psn;
+ wqe->last_psn = rollback_wqe->last_psn;
+ wqe->dma = rollback_wqe->dma;
+ qp->req.psn = rollback_psn;
}
static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
@@ -797,6 +799,9 @@ int rxe_requester(struct rxe_qp *qp)
pkt.mask = rxe_opcode[opcode].mask;
pkt.wqe = wqe;
+ /* save wqe state before we build and send packet */
+ save_state(wqe, qp, &rollback_wqe, &rollback_psn);
+
av = rxe_get_av(&pkt, &ah);
if (unlikely(!av)) {
rxe_dbg_qp(qp, "Failed no address vector\n");
@@ -829,29 +834,29 @@ int rxe_requester(struct rxe_qp *qp)
if (ah)
rxe_put(ah);
- /*
- * To prevent a race on wqe access between requester and completer,
- * wqe members state and psn need to be set before calling
- * rxe_xmit_packet().
- * Otherwise, completer might initiate an unjustified retry flow.
- */
- save_state(wqe, qp, &rollback_wqe, &rollback_psn);
+ /* update wqe state as though we had sent it */
update_wqe_state(qp, wqe, &pkt);
update_wqe_psn(qp, wqe, &pkt, payload);
err = rxe_xmit_packet(qp, &pkt, skb);
if (err) {
- qp->need_req_skb = 1;
+ if (err != -EAGAIN) {
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+ }
+ /* the packet was dropped so reset wqe to the state
+ * before we sent it so we can try to resend
+ */
rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
- if (err == -EAGAIN) {
- rxe_sched_task(&qp->req.task);
- goto exit;
- }
+ /* force a delay until the dropped packet is freed and
+ * the send queue is drained below the low water mark
+ */
+ qp->need_req_skb = 1;
- wqe->status = IB_WC_LOC_QP_OP_ERR;
- goto err;
+ rxe_sched_task(&qp->req.task);
+ goto exit;
}
update_state(qp, &pkt);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 64c64f5f36a8..da470a925efc 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -1469,6 +1469,10 @@ static void flush_recv_queue(struct rxe_qp *qp, bool notify)
return;
}
+ /* recv queue not created. nothing to do. */
+ if (!qp->rq.queue)
+ return;
+
while ((wqe = queue_head(q, q->type))) {
if (notify) {
err = flush_recv_wqe(qp, wqe);
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index 27ca82ec0826..3661cb627d28 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -45,40 +45,41 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_init_attr *init, struct ib_udata *udata,
struct rxe_create_srq_resp __user *uresp)
{
- int err;
- int srq_wqe_size;
struct rxe_queue *q;
- enum queue_type type;
+ int wqe_size;
+ int err;
- srq->ibsrq.event_handler = init->event_handler;
- srq->ibsrq.srq_context = init->srq_context;
- srq->limit = init->attr.srq_limit;
- srq->srq_num = srq->elem.index;
- srq->rq.max_wr = init->attr.max_wr;
- srq->rq.max_sge = init->attr.max_sge;
+ srq->ibsrq.event_handler = init->event_handler;
+ srq->ibsrq.srq_context = init->srq_context;
+ srq->limit = init->attr.srq_limit;
+ srq->srq_num = srq->elem.index;
+ srq->rq.max_wr = init->attr.max_wr;
+ srq->rq.max_sge = init->attr.max_sge;
- srq_wqe_size = rcv_wqe_size(srq->rq.max_sge);
+ wqe_size = sizeof(struct rxe_recv_wqe) +
+ srq->rq.max_sge*sizeof(struct ib_sge);
spin_lock_init(&srq->rq.producer_lock);
spin_lock_init(&srq->rq.consumer_lock);
- type = QUEUE_TYPE_FROM_CLIENT;
- q = rxe_queue_init(rxe, &srq->rq.max_wr, srq_wqe_size, type);
+ q = rxe_queue_init(rxe, &srq->rq.max_wr, wqe_size,
+ QUEUE_TYPE_FROM_CLIENT);
if (!q) {
rxe_dbg_srq(srq, "Unable to allocate queue\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_out;
}
- srq->rq.queue = q;
-
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, q->buf,
q->buf_size, &q->ip);
if (err) {
- vfree(q->buf);
- kfree(q);
- return err;
+ rxe_dbg_srq(srq, "Unable to init mmap info for caller\n");
+ goto err_free;
}
+ srq->rq.queue = q;
+ init->attr.max_wr = srq->rq.max_wr;
+
if (uresp) {
if (copy_to_user(&uresp->srq_num, &srq->srq_num,
sizeof(uresp->srq_num))) {
@@ -88,6 +89,12 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
}
return 0;
+
+err_free:
+ vfree(q->buf);
+ kfree(q);
+err_out:
+ return err;
}
int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
@@ -145,9 +152,10 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata)
{
- int err;
struct rxe_queue *q = srq->rq.queue;
struct mminfo __user *mi = NULL;
+ int wqe_size;
+ int err;
if (mask & IB_SRQ_MAX_WR) {
/*
@@ -156,12 +164,16 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
*/
mi = u64_to_user_ptr(ucmd->mmap_info_addr);
- err = rxe_queue_resize(q, &attr->max_wr,
- rcv_wqe_size(srq->rq.max_sge), udata, mi,
- &srq->rq.producer_lock,
+ wqe_size = sizeof(struct rxe_recv_wqe) +
+ srq->rq.max_sge*sizeof(struct ib_sge);
+
+ err = rxe_queue_resize(q, &attr->max_wr, wqe_size,
+ udata, mi, &srq->rq.producer_lock,
&srq->rq.consumer_lock);
if (err)
- goto err2;
+ goto err_free;
+
+ srq->rq.max_wr = attr->max_wr;
}
if (mask & IB_SRQ_LIMIT)
@@ -169,7 +181,7 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
return 0;
-err2:
+err_free:
rxe_queue_cleanup(q);
srq->rq.queue = NULL;
return err;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 903f0b71447e..48f86839d36a 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -798,7 +798,6 @@ static int init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
rxe_err_qp(qp, "unsupported wr opcode %d",
wr->opcode);
return -EINVAL;
- break;
}
}
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index 2f3a9cda3850..58dddb143b9f 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -74,6 +74,7 @@ struct siw_device {
u32 vendor_part_id;
int numa_node;
+ char raw_gid[ETH_ALEN];
/* physical port state (only one port per device) */
enum ib_port_state state;
@@ -530,11 +531,12 @@ void siw_qp_llp_data_ready(struct sock *sk);
void siw_qp_llp_write_space(struct sock *sk);
/* QP TX path functions */
+int siw_create_tx_threads(void);
+void siw_stop_tx_threads(void);
int siw_run_sq(void *arg);
int siw_qp_sq_process(struct siw_qp *qp);
int siw_sq_start(struct siw_qp *qp);
int siw_activate_tx(struct siw_qp *qp);
-void siw_stop_tx_thread(int nr_cpu);
int siw_get_tx_cpu(struct siw_device *sdev);
void siw_put_tx_cpu(int cpu);
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index da530c0404da..43e776073f49 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -976,6 +976,7 @@ static void siw_accept_newconn(struct siw_cep *cep)
siw_cep_put(cep);
new_cep->listen_cep = NULL;
if (rv) {
+ siw_cancel_mpatimer(new_cep);
siw_cep_set_free(new_cep);
goto error;
}
@@ -1100,9 +1101,12 @@ static void siw_cm_work_handler(struct work_struct *w)
/*
* Socket close before MPA request received.
*/
- siw_dbg_cep(cep, "no mpareq: drop listener\n");
- siw_cep_put(cep->listen_cep);
- cep->listen_cep = NULL;
+ if (cep->listen_cep) {
+ siw_dbg_cep(cep,
+ "no mpareq: drop listener\n");
+ siw_cep_put(cep->listen_cep);
+ cep->listen_cep = NULL;
+ }
}
}
release_cep = 1;
@@ -1227,7 +1231,11 @@ static void siw_cm_llp_data_ready(struct sock *sk)
if (!cep)
goto out;
- siw_dbg_cep(cep, "state: %d\n", cep->state);
+ siw_dbg_cep(cep, "cep state: %d, socket state %d\n",
+ cep->state, sk->sk_state);
+
+ if (sk->sk_state != TCP_ESTABLISHED)
+ goto out;
switch (cep->state) {
case SIW_EPSTATE_RDMA_MODE:
@@ -1501,7 +1509,6 @@ error:
cep->cm_id = NULL;
id->rem_ref(id);
- siw_cep_put(cep);
qp->cep = NULL;
siw_cep_put(cep);
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index 65b5cda5457b..d4b6e0106851 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -75,8 +75,7 @@ static int siw_device_register(struct siw_device *sdev, const char *name)
return rv;
}
- siw_dbg(base_dev, "HWaddr=%pM\n", sdev->netdev->dev_addr);
-
+ siw_dbg(base_dev, "HWaddr=%pM\n", sdev->raw_gid);
return 0;
}
@@ -88,29 +87,6 @@ static void siw_device_cleanup(struct ib_device *base_dev)
xa_destroy(&sdev->mem_xa);
}
-static int siw_create_tx_threads(void)
-{
- int cpu, assigned = 0;
-
- for_each_online_cpu(cpu) {
- /* Skip HT cores */
- if (cpu % cpumask_weight(topology_sibling_cpumask(cpu)))
- continue;
-
- siw_tx_thread[cpu] =
- kthread_run_on_cpu(siw_run_sq,
- (unsigned long *)(long)cpu,
- cpu, "siw_tx/%u");
- if (IS_ERR(siw_tx_thread[cpu])) {
- siw_tx_thread[cpu] = NULL;
- continue;
- }
-
- assigned++;
- }
- return assigned;
-}
-
static int siw_dev_qualified(struct net_device *netdev)
{
/*
@@ -313,24 +289,19 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
return NULL;
base_dev = &sdev->base_dev;
-
sdev->netdev = netdev;
- if (netdev->type != ARPHRD_LOOPBACK && netdev->type != ARPHRD_NONE) {
- addrconf_addr_eui48((unsigned char *)&base_dev->node_guid,
- netdev->dev_addr);
+ if (netdev->addr_len) {
+ memcpy(sdev->raw_gid, netdev->dev_addr,
+ min_t(unsigned int, netdev->addr_len, ETH_ALEN));
} else {
/*
- * This device does not have a HW address,
- * but connection mangagement lib expects gid != 0
+ * This device does not have a HW address, but
+ * connection mangagement requires a unique gid.
*/
- size_t len = min_t(size_t, strlen(base_dev->name), 6);
- char addr[6] = { };
-
- memcpy(addr, base_dev->name, len);
- addrconf_addr_eui48((unsigned char *)&base_dev->node_guid,
- addr);
+ eth_random_addr(sdev->raw_gid);
}
+ addrconf_addr_eui48((u8 *)&base_dev->node_guid, sdev->raw_gid);
base_dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND);
@@ -535,7 +506,6 @@ static struct rdma_link_ops siw_link_ops = {
static __init int siw_init_module(void)
{
int rv;
- int nr_cpu;
if (SENDPAGE_THRESH < SIW_MAX_INLINE) {
pr_info("siw: sendpage threshold too small: %u\n",
@@ -580,12 +550,8 @@ static __init int siw_init_module(void)
return 0;
out_error:
- for (nr_cpu = 0; nr_cpu < nr_cpu_ids; nr_cpu++) {
- if (siw_tx_thread[nr_cpu]) {
- siw_stop_tx_thread(nr_cpu);
- siw_tx_thread[nr_cpu] = NULL;
- }
- }
+ siw_stop_tx_threads();
+
if (siw_crypto_shash)
crypto_free_shash(siw_crypto_shash);
@@ -599,14 +565,8 @@ out_error:
static void __exit siw_exit_module(void)
{
- int cpu;
+ siw_stop_tx_threads();
- for_each_possible_cpu(cpu) {
- if (siw_tx_thread[cpu]) {
- siw_stop_tx_thread(cpu);
- siw_tx_thread[cpu] = NULL;
- }
- }
unregister_netdevice_notifier(&siw_netdev_nb);
rdma_link_unregister(&siw_link_ops);
ib_unregister_driver(RDMA_DRIVER_SIW);
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index 81e9bbd9ebda..47d0197db9a1 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -204,7 +204,7 @@ static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size)
{
if (irq_size) {
irq_size = roundup_pow_of_two(irq_size);
- qp->irq = vzalloc(irq_size * sizeof(struct siw_sqe));
+ qp->irq = vcalloc(irq_size, sizeof(struct siw_sqe));
if (!qp->irq) {
qp->attrs.irq_size = 0;
return -ENOMEM;
@@ -212,7 +212,7 @@ static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size)
}
if (orq_size) {
orq_size = roundup_pow_of_two(orq_size);
- qp->orq = vzalloc(orq_size * sizeof(struct siw_sqe));
+ qp->orq = vcalloc(orq_size, sizeof(struct siw_sqe));
if (!qp->orq) {
qp->attrs.orq_size = 0;
qp->attrs.irq_size = 0;
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 7c7a51d36d0c..60b6a4135961 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -1208,10 +1208,45 @@ struct tx_task_t {
static DEFINE_PER_CPU(struct tx_task_t, siw_tx_task_g);
-void siw_stop_tx_thread(int nr_cpu)
+int siw_create_tx_threads(void)
{
- kthread_stop(siw_tx_thread[nr_cpu]);
- wake_up(&per_cpu(siw_tx_task_g, nr_cpu).waiting);
+ int cpu, assigned = 0;
+
+ for_each_online_cpu(cpu) {
+ struct tx_task_t *tx_task;
+
+ /* Skip HT cores */
+ if (cpu % cpumask_weight(topology_sibling_cpumask(cpu)))
+ continue;
+
+ tx_task = &per_cpu(siw_tx_task_g, cpu);
+ init_llist_head(&tx_task->active);
+ init_waitqueue_head(&tx_task->waiting);
+
+ siw_tx_thread[cpu] =
+ kthread_run_on_cpu(siw_run_sq,
+ (unsigned long *)(long)cpu,
+ cpu, "siw_tx/%u");
+ if (IS_ERR(siw_tx_thread[cpu])) {
+ siw_tx_thread[cpu] = NULL;
+ continue;
+ }
+ assigned++;
+ }
+ return assigned;
+}
+
+void siw_stop_tx_threads(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ if (siw_tx_thread[cpu]) {
+ kthread_stop(siw_tx_thread[cpu]);
+ wake_up(&per_cpu(siw_tx_task_g, cpu).waiting);
+ siw_tx_thread[cpu] = NULL;
+ }
+ }
}
int siw_run_sq(void *data)
@@ -1221,9 +1256,6 @@ int siw_run_sq(void *data)
struct siw_qp *qp;
struct tx_task_t *tx_task = &per_cpu(siw_tx_task_g, nr_cpu);
- init_llist_head(&tx_task->active);
- init_waitqueue_head(&tx_task->waiting);
-
while (1) {
struct llist_node *fifo_list = NULL;
@@ -1239,13 +1271,7 @@ int siw_run_sq(void *data)
* llist_del_all returns a list with newest entry first.
* Re-order list for fairness among QP's.
*/
- while (active) {
- struct llist_node *tmp = active;
-
- active = llist_next(active);
- tmp->next = fifo_list;
- fifo_list = tmp;
- }
+ fifo_list = llist_reverse_order(active);
while (fifo_list) {
qp = container_of(fifo_list, struct siw_qp, tx_list);
fifo_list = llist_next(fifo_list);
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 398ec13db624..fdbef3254e30 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -157,7 +157,7 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
attr->vendor_part_id = sdev->vendor_part_id;
addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
- sdev->netdev->dev_addr);
+ sdev->raw_gid);
return 0;
}
@@ -218,7 +218,7 @@ int siw_query_gid(struct ib_device *base_dev, u32 port, int idx,
/* subnet_prefix == interface_id == 0; */
memset(gid, 0, sizeof(*gid));
- memcpy(&gid->raw[0], sdev->netdev->dev_addr, 6);
+ memcpy(gid->raw, sdev->raw_gid, ETH_ALEN);
return 0;
}
@@ -381,7 +381,7 @@ int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
if (udata)
qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
else
- qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
+ qp->sendq = vcalloc(num_sqe, sizeof(struct siw_sqe));
if (qp->sendq == NULL) {
rv = -ENOMEM;
@@ -414,7 +414,7 @@ int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
qp->recvq =
vmalloc_user(num_rqe * sizeof(struct siw_rqe));
else
- qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
+ qp->recvq = vcalloc(num_rqe, sizeof(struct siw_rqe));
if (qp->recvq == NULL) {
rv = -ENOMEM;
@@ -1494,7 +1494,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
if (pbl->max_buf < num_sle) {
siw_dbg_mem(mem, "too many SGE's: %d > %d\n",
- mem->pbl->max_buf, num_sle);
+ num_sle, pbl->max_buf);
return -ENOMEM;
}
for_each_sg(sl, slp, num_sle, i) {
@@ -1624,7 +1624,7 @@ int siw_create_srq(struct ib_srq *base_srq,
srq->recvq =
vmalloc_user(srq->num_rqe * sizeof(struct siw_rqe));
else
- srq->recvq = vzalloc(srq->num_rqe * sizeof(struct siw_rqe));
+ srq->recvq = vcalloc(srq->num_rqe, sizeof(struct siw_rqe));
if (srq->recvq == NULL) {
rv = -ENOMEM;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 92e1e7587af8..00a7303c8cc6 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -2570,6 +2570,8 @@ static void isert_wait_conn(struct iscsit_conn *conn)
isert_put_unsol_pending_cmds(conn);
isert_wait4cmds(conn);
isert_wait4logout(isert_conn);
+
+ queue_work(isert_release_wq, &isert_conn->release_work);
}
static void isert_free_conn(struct iscsit_conn *conn)
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index b32941dd67cb..b6ee801fd0ff 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -45,7 +45,9 @@ static struct rtrs_rdma_dev_pd dev_pd = {
};
static struct workqueue_struct *rtrs_wq;
-static struct class *rtrs_clt_dev_class;
+static const struct class rtrs_clt_dev_class = {
+ .name = "rtrs-client",
+};
static inline bool rtrs_clt_is_connected(const struct rtrs_clt_sess *clt)
{
@@ -2698,7 +2700,7 @@ static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num,
return ERR_PTR(-ENOMEM);
}
- clt->dev.class = rtrs_clt_dev_class;
+ clt->dev.class = &rtrs_clt_dev_class;
clt->dev.release = rtrs_clt_dev_release;
uuid_gen(&clt->paths_uuid);
INIT_LIST_HEAD_RCU(&clt->paths_list);
@@ -3151,16 +3153,17 @@ static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
static int __init rtrs_client_init(void)
{
- rtrs_rdma_dev_pd_init(0, &dev_pd);
+ int ret = 0;
- rtrs_clt_dev_class = class_create("rtrs-client");
- if (IS_ERR(rtrs_clt_dev_class)) {
+ rtrs_rdma_dev_pd_init(0, &dev_pd);
+ ret = class_register(&rtrs_clt_dev_class);
+ if (ret) {
pr_err("Failed to create rtrs-client dev class\n");
- return PTR_ERR(rtrs_clt_dev_class);
+ return ret;
}
rtrs_wq = alloc_workqueue("rtrs_client_wq", 0, 0);
if (!rtrs_wq) {
- class_destroy(rtrs_clt_dev_class);
+ class_unregister(&rtrs_clt_dev_class);
return -ENOMEM;
}
@@ -3170,7 +3173,7 @@ static int __init rtrs_client_init(void)
static void __exit rtrs_client_exit(void)
{
destroy_workqueue(rtrs_wq);
- class_destroy(rtrs_clt_dev_class);
+ class_unregister(&rtrs_clt_dev_class);
rtrs_rdma_dev_pd_deinit(&dev_pd);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index 5adba0f754b6..3f305e694fe8 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -164,7 +164,7 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_path *srv_pat
*/
goto unlock;
}
- srv->dev.class = rtrs_dev_class;
+ srv->dev.class = &rtrs_dev_class;
err = dev_set_name(&srv->dev, "%s", srv_path->s.sessname);
if (err)
goto unlock;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index c38901e2c8f4..75e56604e462 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -27,7 +27,9 @@ MODULE_LICENSE("GPL");
#define MAX_HDR_SIZE PAGE_SIZE
static struct rtrs_rdma_dev_pd dev_pd;
-struct class *rtrs_dev_class;
+const struct class rtrs_dev_class = {
+ .name = "rtrs-server",
+};
static struct rtrs_srv_ib_ctx ib_ctx;
static int __read_mostly max_chunk_size = DEFAULT_MAX_CHUNK_SIZE;
@@ -2253,11 +2255,10 @@ static int __init rtrs_server_init(void)
err);
return err;
}
- rtrs_dev_class = class_create("rtrs-server");
- if (IS_ERR(rtrs_dev_class)) {
- err = PTR_ERR(rtrs_dev_class);
+ err = class_register(&rtrs_dev_class);
+ if (err)
goto out_err;
- }
+
rtrs_wq = alloc_workqueue("rtrs_server_wq", 0, 0);
if (!rtrs_wq) {
err = -ENOMEM;
@@ -2267,7 +2268,7 @@ static int __init rtrs_server_init(void)
return 0;
out_dev_class:
- class_destroy(rtrs_dev_class);
+ class_unregister(&rtrs_dev_class);
out_err:
return err;
}
@@ -2275,7 +2276,7 @@ out_err:
static void __exit rtrs_server_exit(void)
{
destroy_workqueue(rtrs_wq);
- class_destroy(rtrs_dev_class);
+ class_unregister(&rtrs_dev_class);
rtrs_rdma_dev_pd_deinit(&dev_pd);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
index 2f8a638e36fa..5e325b82ff33 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -129,7 +129,7 @@ struct rtrs_srv_ib_ctx {
int ib_dev_count;
};
-extern struct class *rtrs_dev_class;
+extern const struct class rtrs_dev_class;
void close_path(struct rtrs_srv_path *srv_path);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 0e513a7e5ac8..2916e77f589b 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1979,12 +1979,8 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
- else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
- scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
- else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
- scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
srp_free_req(ch, req, scmnd,
be32_to_cpu(rsp->req_lim_delta));
@@ -2788,7 +2784,6 @@ static int srp_abort(struct scsi_cmnd *scmnd)
u32 tag;
u16 ch_idx;
struct srp_rdma_ch *ch;
- int ret;
shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
@@ -2802,19 +2797,14 @@ static int srp_abort(struct scsi_cmnd *scmnd)
shost_printk(KERN_ERR, target->scsi_host,
"Sending SRP abort for tag %#x\n", tag);
if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
- SRP_TSK_ABORT_TASK, NULL) == 0)
- ret = SUCCESS;
- else if (target->rport->state == SRP_RPORT_LOST)
- ret = FAST_IO_FAIL;
- else
- ret = FAILED;
- if (ret == SUCCESS) {
+ SRP_TSK_ABORT_TASK, NULL) == 0) {
srp_free_req(ch, req, scmnd, 0);
- scmnd->result = DID_ABORT << 16;
- scsi_done(scmnd);
+ return SUCCESS;
}
+ if (target->rport->state == SRP_RPORT_LOST)
+ return FAST_IO_FAIL;
- return ret;
+ return FAILED;
}
static int srp_reset_device(struct scsi_cmnd *scmnd)
diff --git a/drivers/input/gameport/Kconfig b/drivers/input/gameport/Kconfig
index 5a2c2fb3217d..fe73b26e647a 100644
--- a/drivers/input/gameport/Kconfig
+++ b/drivers/input/gameport/Kconfig
@@ -25,6 +25,7 @@ if GAMEPORT
config GAMEPORT_NS558
tristate "Classic ISA and PnP gameport support"
+ depends on ISA
help
Say Y here if you have an ISA or PnP gameport.
@@ -35,6 +36,7 @@ config GAMEPORT_NS558
config GAMEPORT_L4
tristate "PDPI Lightning 4 gamecard support"
+ depends on ISA
help
Say Y here if you have a PDPI Lightning 4 gamecard.
@@ -53,7 +55,7 @@ config GAMEPORT_EMU10K1
config GAMEPORT_FM801
tristate "ForteMedia FM801 gameport support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
Say Y here if you have ForteMedia FM801 PCI audio controller
(Abit AU10, Genius Sound Maker, HP Workstation zx2000,
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index a1443320b419..34f416a3ebcb 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -519,12 +519,32 @@ EXPORT_SYMBOL(gameport_set_phys);
static void gameport_default_trigger(struct gameport *gameport)
{
+#ifdef CONFIG_HAS_IOPORT
outb(0xff, gameport->io);
+#endif
}
static unsigned char gameport_default_read(struct gameport *gameport)
{
+#ifdef CONFIG_HAS_IOPORT
return inb(gameport->io);
+#else
+ return 0xff;
+#endif
+}
+
+static void gameport_setup_default_handlers(struct gameport *gameport)
+{
+ if ((!gameport->trigger || !gameport->read) &&
+ !IS_ENABLED(CONFIG_HAS_IOPORT))
+ dev_err(&gameport->dev,
+ "I/O port access is required for %s (%s) but is not available\n",
+ gameport->phys, gameport->name);
+
+ if (!gameport->trigger)
+ gameport->trigger = gameport_default_trigger;
+ if (!gameport->read)
+ gameport->read = gameport_default_read;
}
/*
@@ -545,11 +565,7 @@ static void gameport_init_port(struct gameport *gameport)
if (gameport->parent)
gameport->dev.parent = &gameport->parent->dev;
- if (!gameport->trigger)
- gameport->trigger = gameport_default_trigger;
- if (!gameport->read)
- gameport->read = gameport_default_read;
-
+ gameport_setup_default_handlers(gameport);
INIT_LIST_HEAD(&gameport->node);
spin_lock_init(&gameport->timer_lock);
timer_setup(&gameport->poll_timer, gameport_run_poll_handler, 0);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index cdb193317c3b..ede380551e55 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -264,6 +264,7 @@ static const struct xpad_device {
{ 0x0f0d, 0x0067, "HORIPAD ONE", 0, XTYPE_XBOXONE },
{ 0x0f0d, 0x0078, "Hori Real Arcade Pro V Kai Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0f0d, 0x00c5, "Hori Fighting Commander ONE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+ { 0x0f0d, 0x00dc, "HORIPAD FPS for Nintendo Switch", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX },
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
@@ -365,6 +366,7 @@ static const struct xpad_device {
{ 0x31e3, 0x1300, "Wooting 60HE (AVR)", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1310, "Wooting 60HE (ARM)", 0, XTYPE_XBOX360 },
{ 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 },
+ { 0x3537, 0x1004, "GameSir T4 Kaleid", 0, XTYPE_XBOX360 },
{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
{ 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
@@ -499,6 +501,8 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x2f24), /* GameSir controllers */
XPAD_XBOX360_VENDOR(0x31e3), /* Wooting Keyboards */
XPAD_XBOX360_VENDOR(0x3285), /* Nacon GC-100 */
+ XPAD_XBOX360_VENDOR(0x3537), /* GameSir Controllers */
+ XPAD_XBOXONE_VENDOR(0x3537), /* GameSir Controllers */
{ }
};
@@ -1720,6 +1724,27 @@ static int xpad_start_input(struct usb_xpad *xpad)
return error;
}
}
+ if (xpad->xtype == XTYPE_XBOX360) {
+ /*
+ * Some third-party controllers Xbox 360-style controllers
+ * require this message to finish initialization.
+ */
+ u8 dummy[20];
+
+ error = usb_control_msg_recv(xpad->udev, 0,
+ /* bRequest */ 0x01,
+ /* bmRequestType */
+ USB_TYPE_VENDOR | USB_DIR_IN |
+ USB_RECIP_INTERFACE,
+ /* wValue */ 0x100,
+ /* wIndex */ 0x00,
+ dummy, sizeof(dummy),
+ 25, GFP_KERNEL);
+ if (error)
+ dev_warn(&xpad->dev->dev,
+ "unable to receive magic message: %d\n",
+ error);
+ }
return 0;
}
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 896a5a989ddc..61e8e43e9c2b 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -713,17 +713,11 @@ static int adp5588_fw_parse(struct adp5588_kpad *kpad)
return 0;
}
-static void adp5588_disable_regulator(void *reg)
-{
- regulator_disable(reg);
-}
-
static int adp5588_probe(struct i2c_client *client)
{
struct adp5588_kpad *kpad;
struct input_dev *input;
struct gpio_desc *gpio;
- struct regulator *vcc;
unsigned int revid;
int ret;
int error;
@@ -749,16 +743,7 @@ static int adp5588_probe(struct i2c_client *client)
if (error)
return error;
- vcc = devm_regulator_get(&client->dev, "vcc");
- if (IS_ERR(vcc))
- return PTR_ERR(vcc);
-
- error = regulator_enable(vcc);
- if (error)
- return error;
-
- error = devm_add_action_or_reset(&client->dev,
- adp5588_disable_regulator, vcc);
+ error = devm_regulator_get_enable(&client->dev, "vcc");
if (error)
return error;
diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c
index a20a4e186639..e305c44cd0aa 100644
--- a/drivers/input/keyboard/amikbd.c
+++ b/drivers/input/keyboard/amikbd.c
@@ -196,7 +196,7 @@ static int __init amikbd_probe(struct platform_device *pdev)
struct input_dev *dev;
int i, err;
- dev = input_allocate_device();
+ dev = devm_input_allocate_device(&pdev->dev);
if (!dev) {
dev_err(&pdev->dev, "Not enough memory for input device\n");
return -ENOMEM;
@@ -208,7 +208,6 @@ static int __init amikbd_probe(struct platform_device *pdev)
dev->id.vendor = 0x0001;
dev->id.product = 0x0001;
dev->id.version = 0x0100;
- dev->dev.parent = &pdev->dev;
dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
@@ -218,35 +217,21 @@ static int __init amikbd_probe(struct platform_device *pdev)
amikbd_init_console_keymaps();
ciaa.cra &= ~0x41; /* serial data in, turn off TA */
- err = request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd",
- dev);
+ err = devm_request_irq(&pdev->dev, IRQ_AMIGA_CIAA_SP, amikbd_interrupt,
+ 0, "amikbd", dev);
if (err)
- goto fail2;
+ return err;
err = input_register_device(dev);
if (err)
- goto fail3;
+ return err;
platform_set_drvdata(pdev, dev);
return 0;
-
- fail3: free_irq(IRQ_AMIGA_CIAA_SP, dev);
- fail2: input_free_device(dev);
- return err;
-}
-
-static int __exit amikbd_remove(struct platform_device *pdev)
-{
- struct input_dev *dev = platform_get_drvdata(pdev);
-
- free_irq(IRQ_AMIGA_CIAA_SP, dev);
- input_unregister_device(dev);
- return 0;
}
static struct platform_driver amikbd_driver = {
- .remove = __exit_p(amikbd_remove),
.driver = {
.name = "amiga-keyboard",
},
diff --git a/drivers/input/keyboard/bcm-keypad.c b/drivers/input/keyboard/bcm-keypad.c
index 56a919ec23b5..f3c3746acd4c 100644
--- a/drivers/input/keyboard/bcm-keypad.c
+++ b/drivers/input/keyboard/bcm-keypad.c
@@ -307,7 +307,6 @@ static int bcm_kp_probe(struct platform_device *pdev)
{
struct bcm_kp *kp;
struct input_dev *input_dev;
- struct resource *res;
int error;
kp = devm_kzalloc(&pdev->dev, sizeof(*kp), GFP_KERNEL);
@@ -353,29 +352,16 @@ static int bcm_kp_probe(struct platform_device *pdev)
return error;
}
- /* Get the KEYPAD base address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Missing keypad base address resource\n");
- return -ENODEV;
- }
-
- kp->base = devm_ioremap_resource(&pdev->dev, res);
+ kp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kp->base))
return PTR_ERR(kp->base);
/* Enable clock */
- kp->clk = devm_clk_get(&pdev->dev, "peri_clk");
+ kp->clk = devm_clk_get_optional(&pdev->dev, "peri_clk");
if (IS_ERR(kp->clk)) {
- error = PTR_ERR(kp->clk);
- if (error != -ENOENT) {
- if (error != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to get clock\n");
- return error;
- }
- dev_dbg(&pdev->dev,
- "No clock specified. Assuming it's enabled\n");
- kp->clk = NULL;
+ return dev_err_probe(&pdev->dev, PTR_ERR(kp->clk), "Failed to get clock\n");
+ } else if (!kp->clk) {
+ dev_dbg(&pdev->dev, "No clock specified. Assuming it's enabled\n");
} else {
unsigned int desired_rate;
long actual_rate;
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index c928829a8b0c..2e7c2c046e67 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -523,18 +523,15 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
NULL, GPIOD_IN, desc);
if (IS_ERR(bdata->gpiod)) {
error = PTR_ERR(bdata->gpiod);
- if (error == -ENOENT) {
- /*
- * GPIO is optional, we may be dealing with
- * purely interrupt-driven setup.
- */
- bdata->gpiod = NULL;
- } else {
- if (error != -EPROBE_DEFER)
- dev_err(dev, "failed to get gpio: %d\n",
- error);
- return error;
- }
+ if (error != -ENOENT)
+ return dev_err_probe(dev, error,
+ "failed to get gpio\n");
+
+ /*
+ * GPIO is optional, we may be dealing with
+ * purely interrupt-driven setup.
+ */
+ bdata->gpiod = NULL;
}
} else if (gpio_is_valid(button->gpio)) {
/*
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index c3937d2fc744..ba00ecfbd343 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -299,13 +299,9 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
NULL, GPIOD_IN,
button->desc);
if (IS_ERR(bdata->gpiod)) {
- error = PTR_ERR(bdata->gpiod);
- if (error != -EPROBE_DEFER)
- dev_err(dev,
- "failed to get gpio: %d\n",
- error);
fwnode_handle_put(child);
- return error;
+ return dev_err_probe(dev, PTR_ERR(bdata->gpiod),
+ "failed to get gpio\n");
}
} else if (gpio_is_valid(button->gpio)) {
/*
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 3964f6e0f6af..7bee93e9b0f5 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -556,6 +556,7 @@ static int init_pwm(struct lm8323_chip *lm, int id, struct device *dev,
const char *name)
{
struct lm8323_pwm *pwm;
+ int err;
BUG_ON(id > 3);
@@ -575,9 +576,11 @@ static int init_pwm(struct lm8323_chip *lm, int id, struct device *dev,
pwm->cdev.name = name;
pwm->cdev.brightness_set = lm8323_pwm_set_brightness;
pwm->cdev.groups = lm8323_pwm_groups;
- if (led_classdev_register(dev, &pwm->cdev) < 0) {
- dev_err(dev, "couldn't register PWM %d\n", id);
- return -1;
+
+ err = devm_led_classdev_register(dev, &pwm->cdev);
+ if (err) {
+ dev_err(dev, "couldn't register PWM %d: %d\n", id, err);
+ return err;
}
pwm->enabled = true;
}
@@ -585,8 +588,6 @@ static int init_pwm(struct lm8323_chip *lm, int id, struct device *dev,
return 0;
}
-static struct i2c_driver lm8323_i2c_driver;
-
static ssize_t lm8323_show_disable(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -615,6 +616,12 @@ static ssize_t lm8323_set_disable(struct device *dev,
}
static DEVICE_ATTR(disable_kp, 0644, lm8323_show_disable, lm8323_set_disable);
+static struct attribute *lm8323_attrs[] = {
+ &dev_attr_disable_kp.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(lm8323);
+
static int lm8323_probe(struct i2c_client *client)
{
struct lm8323_platform_data *pdata = dev_get_platdata(&client->dev);
@@ -642,12 +649,13 @@ static int lm8323_probe(struct i2c_client *client)
return -EINVAL;
}
- lm = kzalloc(sizeof *lm, GFP_KERNEL);
- idev = input_allocate_device();
- if (!lm || !idev) {
- err = -ENOMEM;
- goto fail1;
- }
+ lm = devm_kzalloc(&client->dev, sizeof(*lm), GFP_KERNEL);
+ if (!lm)
+ return -ENOMEM;
+
+ idev = devm_input_allocate_device(&client->dev);
+ if (!idev)
+ return -ENOMEM;
lm->client = client;
lm->idev = idev;
@@ -663,8 +671,10 @@ static int lm8323_probe(struct i2c_client *client)
lm8323_reset(lm);
- /* Nothing's set up to service the IRQ yet, so just spin for max.
- * 100ms until we can configure. */
+ /*
+ * Nothing's set up to service the IRQ yet, so just spin for max.
+ * 100ms until we can configure.
+ */
tmo = jiffies + msecs_to_jiffies(100);
while (lm8323_read(lm, LM8323_CMD_READ_INT, data, 1) == 1) {
if (data[0] & INT_NOINIT)
@@ -684,21 +694,17 @@ static int lm8323_probe(struct i2c_client *client)
/* If a true probe check the device */
if (lm8323_read_id(lm, data) != 0) {
dev_err(&client->dev, "device not found\n");
- err = -ENODEV;
- goto fail1;
+ return -ENODEV;
}
for (pwm = 0; pwm < LM8323_NUM_PWMS; pwm++) {
err = init_pwm(lm, pwm + 1, &client->dev,
pdata->pwm_names[pwm]);
- if (err < 0)
- goto fail2;
+ if (err)
+ return err;
}
lm->kp_enabled = true;
- err = device_create_file(&client->dev, &dev_attr_disable_kp);
- if (err < 0)
- goto fail2;
idev->name = pdata->name ? : "LM8323 keypad";
snprintf(lm->phys, sizeof(lm->phys),
@@ -719,14 +725,16 @@ static int lm8323_probe(struct i2c_client *client)
err = input_register_device(idev);
if (err) {
dev_dbg(&client->dev, "error registering input device\n");
- goto fail3;
+ return err;
}
- err = request_threaded_irq(client->irq, NULL, lm8323_irq,
- IRQF_TRIGGER_LOW|IRQF_ONESHOT, "lm8323", lm);
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, lm8323_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "lm8323", lm);
if (err) {
dev_err(&client->dev, "could not get IRQ %d\n", client->irq);
- goto fail4;
+ return err;
}
i2c_set_clientdata(client, lm);
@@ -735,39 +743,6 @@ static int lm8323_probe(struct i2c_client *client)
enable_irq_wake(client->irq);
return 0;
-
-fail4:
- input_unregister_device(idev);
- idev = NULL;
-fail3:
- device_remove_file(&client->dev, &dev_attr_disable_kp);
-fail2:
- while (--pwm >= 0)
- if (lm->pwm[pwm].enabled)
- led_classdev_unregister(&lm->pwm[pwm].cdev);
-fail1:
- input_free_device(idev);
- kfree(lm);
- return err;
-}
-
-static void lm8323_remove(struct i2c_client *client)
-{
- struct lm8323_chip *lm = i2c_get_clientdata(client);
- int i;
-
- disable_irq_wake(client->irq);
- free_irq(client->irq, lm);
-
- input_unregister_device(lm->idev);
-
- device_remove_file(&lm->client->dev, &dev_attr_disable_kp);
-
- for (i = 0; i < 3; i++)
- if (lm->pwm[i].enabled)
- led_classdev_unregister(&lm->pwm[i].cdev);
-
- kfree(lm);
}
/*
@@ -823,11 +798,11 @@ static const struct i2c_device_id lm8323_id[] = {
static struct i2c_driver lm8323_i2c_driver = {
.driver = {
- .name = "lm8323",
- .pm = pm_sleep_ptr(&lm8323_pm_ops),
+ .name = "lm8323",
+ .pm = pm_sleep_ptr(&lm8323_pm_ops),
+ .dev_groups = lm8323_groups,
},
.probe = lm8323_probe,
- .remove = lm8323_remove,
.id_table = lm8323_id,
};
MODULE_DEVICE_TABLE(i2c, lm8323_id);
diff --git a/drivers/input/keyboard/lm8333.c b/drivers/input/keyboard/lm8333.c
index c9f05764e36d..1c070c499c85 100644
--- a/drivers/input/keyboard/lm8333.c
+++ b/drivers/input/keyboard/lm8333.c
@@ -142,18 +142,18 @@ static int lm8333_probe(struct i2c_client *client)
return -EINVAL;
}
- lm8333 = kzalloc(sizeof(*lm8333), GFP_KERNEL);
- input = input_allocate_device();
- if (!lm8333 || !input) {
- err = -ENOMEM;
- goto free_mem;
- }
+ lm8333 = devm_kzalloc(&client->dev, sizeof(*lm8333), GFP_KERNEL);
+ if (!lm8333)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(&client->dev);
+ if (!input)
+ return -ENOMEM;
lm8333->client = client;
lm8333->input = input;
input->name = client->name;
- input->dev.parent = &client->dev;
input->id.bustype = BUS_I2C;
input_set_capability(input, EV_MSC, MSC_SCAN);
@@ -162,7 +162,7 @@ static int lm8333_probe(struct i2c_client *client)
LM8333_NUM_ROWS, LM8333_NUM_COLS,
lm8333->keycodes, input);
if (err)
- goto free_mem;
+ return err;
if (pdata->debounce_time) {
err = lm8333_write8(lm8333, LM8333_DEBOUNCE,
@@ -178,34 +178,19 @@ static int lm8333_probe(struct i2c_client *client)
dev_warn(&client->dev, "Unable to set active time\n");
}
- err = request_threaded_irq(client->irq, NULL, lm8333_irq_thread,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "lm8333", lm8333);
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, lm8333_irq_thread,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "lm8333", lm8333);
if (err)
- goto free_mem;
+ return err;
err = input_register_device(input);
if (err)
- goto free_irq;
+ return err;
i2c_set_clientdata(client, lm8333);
return 0;
-
- free_irq:
- free_irq(client->irq, lm8333);
- free_mem:
- input_free_device(input);
- kfree(lm8333);
- return err;
-}
-
-static void lm8333_remove(struct i2c_client *client)
-{
- struct lm8333 *lm8333 = i2c_get_clientdata(client);
-
- free_irq(client->irq, lm8333);
- input_unregister_device(lm8333->input);
- kfree(lm8333);
}
static const struct i2c_device_id lm8333_id[] = {
@@ -219,7 +204,6 @@ static struct i2c_driver lm8333_driver = {
.name = "lm8333",
},
.probe = lm8333_probe,
- .remove = lm8333_remove,
.id_table = lm8333_id,
};
module_i2c_driver(lm8333_driver);
diff --git a/drivers/input/keyboard/lpc32xx-keys.c b/drivers/input/keyboard/lpc32xx-keys.c
index 911e1181cd6f..322a87807159 100644
--- a/drivers/input/keyboard/lpc32xx-keys.c
+++ b/drivers/input/keyboard/lpc32xx-keys.c
@@ -160,17 +160,10 @@ static int lpc32xx_kscan_probe(struct platform_device *pdev)
{
struct lpc32xx_kscan_drv *kscandat;
struct input_dev *input;
- struct resource *res;
size_t keymap_size;
int error;
int irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get platform I/O memory\n");
- return -EINVAL;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -EINVAL;
@@ -221,7 +214,7 @@ static int lpc32xx_kscan_probe(struct platform_device *pdev)
input_set_drvdata(kscandat->input, kscandat);
- kscandat->kscan_base = devm_ioremap_resource(&pdev->dev, res);
+ kscandat->kscan_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kscandat->kscan_base))
return PTR_ERR(kscandat->kscan_base);
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
index de312d8eb974..2410f676c7f9 100644
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ b/drivers/input/keyboard/mcs_touchkey.c
@@ -92,6 +92,13 @@ static irqreturn_t mcs_touchkey_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void mcs_touchkey_poweroff(void *data)
+{
+ struct mcs_touchkey_data *touchkey = data;
+
+ touchkey->poweron(false);
+}
+
static int mcs_touchkey_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
@@ -109,13 +116,16 @@ static int mcs_touchkey_probe(struct i2c_client *client)
return -EINVAL;
}
- data = kzalloc(struct_size(data, keycodes, pdata->key_maxval + 1),
- GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!data || !input_dev) {
- dev_err(&client->dev, "Failed to allocate memory\n");
- error = -ENOMEM;
- goto err_free_mem;
+ data = devm_kzalloc(&client->dev,
+ struct_size(data, keycodes, pdata->key_maxval + 1),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ input_dev = devm_input_allocate_device(&client->dev);
+ if (!input_dev) {
+ dev_err(&client->dev, "Failed to allocate input device\n");
+ return -ENOMEM;
}
data->client = client;
@@ -136,15 +146,13 @@ static int mcs_touchkey_probe(struct i2c_client *client)
fw_ver = i2c_smbus_read_byte_data(client, fw_reg);
if (fw_ver < 0) {
- error = fw_ver;
- dev_err(&client->dev, "i2c read error[%d]\n", error);
- goto err_free_mem;
+ dev_err(&client->dev, "i2c read error[%d]\n", fw_ver);
+ return fw_ver;
}
dev_info(&client->dev, "Firmware version: %d\n", fw_ver);
input_dev->name = "MELFAS MCS Touchkey";
input_dev->id.bustype = BUS_I2C;
- input_dev->dev.parent = &client->dev;
input_dev->evbit[0] = BIT_MASK(EV_KEY);
if (!pdata->no_autorepeat)
input_dev->evbit[0] |= BIT_MASK(EV_REP);
@@ -169,40 +177,28 @@ static int mcs_touchkey_probe(struct i2c_client *client)
if (pdata->poweron) {
data->poweron = pdata->poweron;
data->poweron(true);
+
+ error = devm_add_action_or_reset(&client->dev,
+ mcs_touchkey_poweroff, data);
+ if (error)
+ return error;
}
- error = request_threaded_irq(client->irq, NULL, mcs_touchkey_interrupt,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- client->dev.driver->name, data);
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, mcs_touchkey_interrupt,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ client->dev.driver->name, data);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
- goto err_free_mem;
+ return error;
}
error = input_register_device(input_dev);
if (error)
- goto err_free_irq;
+ return error;
i2c_set_clientdata(client, data);
return 0;
-
-err_free_irq:
- free_irq(client->irq, data);
-err_free_mem:
- input_free_device(input_dev);
- kfree(data);
- return error;
-}
-
-static void mcs_touchkey_remove(struct i2c_client *client)
-{
- struct mcs_touchkey_data *data = i2c_get_clientdata(client);
-
- free_irq(client->irq, data);
- if (data->poweron)
- data->poweron(false);
- input_unregister_device(data->input_dev);
- kfree(data);
}
static void mcs_touchkey_shutdown(struct i2c_client *client)
@@ -259,7 +255,6 @@ static struct i2c_driver mcs_touchkey_driver = {
.pm = pm_sleep_ptr(&mcs_touchkey_pm_ops),
},
.probe = mcs_touchkey_probe,
- .remove = mcs_touchkey_remove,
.shutdown = mcs_touchkey_shutdown,
.id_table = mcs_touchkey_id,
};
diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c
index 970f2a671c2e..b3ccc97f61e1 100644
--- a/drivers/input/keyboard/nomadik-ske-keypad.c
+++ b/drivers/input/keyboard/nomadik-ske-keypad.c
@@ -221,13 +221,20 @@ static irqreturn_t ske_keypad_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void ske_keypad_board_exit(void *data)
+{
+ struct ske_keypad *keypad = data;
+
+ keypad->board->exit();
+}
+
static int __init ske_keypad_probe(struct platform_device *pdev)
{
const struct ske_keypad_platform_data *plat =
dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
struct ske_keypad *keypad;
struct input_dev *input;
- struct resource *res;
int irq;
int error;
@@ -238,20 +245,14 @@ static int __init ske_keypad_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return -EINVAL;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "missing platform resources\n");
- return -EINVAL;
- }
+ return irq;
- keypad = kzalloc(sizeof(struct ske_keypad), GFP_KERNEL);
- input = input_allocate_device();
+ keypad = devm_kzalloc(dev, sizeof(struct ske_keypad),
+ GFP_KERNEL);
+ input = devm_input_allocate_device(dev);
if (!keypad || !input) {
dev_err(&pdev->dev, "failed to allocate keypad memory\n");
- error = -ENOMEM;
- goto err_free_mem;
+ return -ENOMEM;
}
keypad->irq = irq;
@@ -259,31 +260,20 @@ static int __init ske_keypad_probe(struct platform_device *pdev)
keypad->input = input;
spin_lock_init(&keypad->ske_keypad_lock);
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "failed to request I/O memory\n");
- error = -EBUSY;
- goto err_free_mem;
- }
-
- keypad->reg_base = ioremap(res->start, resource_size(res));
- if (!keypad->reg_base) {
- dev_err(&pdev->dev, "failed to remap I/O memory\n");
- error = -ENXIO;
- goto err_free_mem_region;
- }
+ keypad->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(keypad->reg_base))
+ return PTR_ERR(keypad->reg_base);
- keypad->pclk = clk_get(&pdev->dev, "apb_pclk");
+ keypad->pclk = devm_clk_get_enabled(dev, "apb_pclk");
if (IS_ERR(keypad->pclk)) {
dev_err(&pdev->dev, "failed to get pclk\n");
- error = PTR_ERR(keypad->pclk);
- goto err_iounmap;
+ return PTR_ERR(keypad->pclk);
}
- keypad->clk = clk_get(&pdev->dev, NULL);
+ keypad->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(keypad->clk)) {
dev_err(&pdev->dev, "failed to get clk\n");
- error = PTR_ERR(keypad->clk);
- goto err_pclk;
+ return PTR_ERR(keypad->clk);
}
input->id.bustype = BUS_HOST;
@@ -295,48 +285,43 @@ static int __init ske_keypad_probe(struct platform_device *pdev)
keypad->keymap, input);
if (error) {
dev_err(&pdev->dev, "Failed to build keymap\n");
- goto err_clk;
+ return error;
}
input_set_capability(input, EV_MSC, MSC_SCAN);
if (!plat->no_autorepeat)
__set_bit(EV_REP, input->evbit);
- error = clk_prepare_enable(keypad->pclk);
- if (error) {
- dev_err(&pdev->dev, "Failed to prepare/enable pclk\n");
- goto err_clk;
- }
-
- error = clk_prepare_enable(keypad->clk);
- if (error) {
- dev_err(&pdev->dev, "Failed to prepare/enable clk\n");
- goto err_pclk_disable;
- }
-
-
/* go through board initialization helpers */
if (keypad->board->init)
keypad->board->init();
+ if (keypad->board->exit) {
+ error = devm_add_action_or_reset(dev, ske_keypad_board_exit,
+ keypad);
+ if (error)
+ return error;
+ }
+
error = ske_keypad_chip_init(keypad);
if (error) {
dev_err(&pdev->dev, "unable to init keypad hardware\n");
- goto err_clk_disable;
+ return error;
}
- error = request_threaded_irq(keypad->irq, NULL, ske_keypad_irq,
- IRQF_ONESHOT, "ske-keypad", keypad);
+ error = devm_request_threaded_irq(dev, keypad->irq,
+ NULL, ske_keypad_irq,
+ IRQF_ONESHOT, "ske-keypad", keypad);
if (error) {
dev_err(&pdev->dev, "allocate irq %d failed\n", keypad->irq);
- goto err_clk_disable;
+ return error;
}
error = input_register_device(input);
if (error) {
dev_err(&pdev->dev,
- "unable to register input device: %d\n", error);
- goto err_free_irq;
+ "unable to register input device: %d\n", error);
+ return error;
}
if (plat->wakeup_enable)
@@ -345,47 +330,6 @@ static int __init ske_keypad_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, keypad);
return 0;
-
-err_free_irq:
- free_irq(keypad->irq, keypad);
-err_clk_disable:
- clk_disable_unprepare(keypad->clk);
-err_pclk_disable:
- clk_disable_unprepare(keypad->pclk);
-err_clk:
- clk_put(keypad->clk);
-err_pclk:
- clk_put(keypad->pclk);
-err_iounmap:
- iounmap(keypad->reg_base);
-err_free_mem_region:
- release_mem_region(res->start, resource_size(res));
-err_free_mem:
- input_free_device(input);
- kfree(keypad);
- return error;
-}
-
-static int ske_keypad_remove(struct platform_device *pdev)
-{
- struct ske_keypad *keypad = platform_get_drvdata(pdev);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- free_irq(keypad->irq, keypad);
-
- input_unregister_device(keypad->input);
-
- clk_disable_unprepare(keypad->clk);
- clk_put(keypad->clk);
-
- if (keypad->board->exit)
- keypad->board->exit();
-
- iounmap(keypad->reg_base);
- release_mem_region(res->start, resource_size(res));
- kfree(keypad);
-
- return 0;
}
static int ske_keypad_suspend(struct device *dev)
@@ -424,7 +368,6 @@ static struct platform_driver ske_keypad_driver = {
.name = "nmk-ske-keypad",
.pm = pm_sleep_ptr(&ske_keypad_dev_pm_ops),
},
- .remove = ske_keypad_remove,
};
module_platform_driver_probe(ske_keypad_driver, ske_keypad_probe);
diff --git a/drivers/input/keyboard/nspire-keypad.c b/drivers/input/keyboard/nspire-keypad.c
index e9fa1423f136..096c18d7bca1 100644
--- a/drivers/input/keyboard/nspire-keypad.c
+++ b/drivers/input/keyboard/nspire-keypad.c
@@ -186,8 +186,7 @@ static int nspire_keypad_probe(struct platform_device *pdev)
return PTR_ERR(keypad->clk);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- keypad->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ keypad->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(keypad->reg_base))
return PTR_ERR(keypad->reg_base);
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index 9f085d5679db..773e55eed88b 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -341,17 +341,10 @@ static int omap4_keypad_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct omap4_keypad *keypad_data;
struct input_dev *input_dev;
- struct resource *res;
unsigned int max_keys;
int irq;
int error;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "no base address specified\n");
- return -EINVAL;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -370,7 +363,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
if (error)
return error;
- keypad_data->base = devm_ioremap_resource(dev, res);
+ keypad_data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(keypad_data->base))
return PTR_ERR(keypad_data->base);
diff --git a/drivers/input/keyboard/opencores-kbd.c b/drivers/input/keyboard/opencores-kbd.c
index b0ea387414c1..7ffe1a70c856 100644
--- a/drivers/input/keyboard/opencores-kbd.c
+++ b/drivers/input/keyboard/opencores-kbd.c
@@ -39,15 +39,8 @@ static int opencores_kbd_probe(struct platform_device *pdev)
{
struct input_dev *input;
struct opencores_kbd *opencores_kbd;
- struct resource *res;
int irq, i, error;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "missing board memory resource\n");
- return -EINVAL;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -EINVAL;
@@ -65,7 +58,7 @@ static int opencores_kbd_probe(struct platform_device *pdev)
opencores_kbd->input = input;
- opencores_kbd->addr = devm_ioremap_resource(&pdev->dev, res);
+ opencores_kbd->addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(opencores_kbd->addr))
return PTR_ERR(opencores_kbd->addr);
diff --git a/drivers/input/keyboard/pinephone-keyboard.c b/drivers/input/keyboard/pinephone-keyboard.c
index 038ff3549a7a..147b1f288a33 100644
--- a/drivers/input/keyboard/pinephone-keyboard.c
+++ b/drivers/input/keyboard/pinephone-keyboard.c
@@ -318,40 +318,22 @@ static void ppkb_close(struct input_dev *input)
ppkb_set_scan(client, false);
}
-static void ppkb_regulator_disable(void *regulator)
-{
- regulator_disable(regulator);
-}
-
static int ppkb_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
unsigned int phys_rows, phys_cols;
struct pinephone_keyboard *ppkb;
- struct regulator *vbat_supply;
u8 info[PPKB_MATRIX_SIZE + 1];
struct device_node *i2c_bus;
int ret;
int error;
- vbat_supply = devm_regulator_get(dev, "vbat");
- error = PTR_ERR_OR_ZERO(vbat_supply);
+ error = devm_regulator_get_enable(dev, "vbat");
if (error) {
dev_err(dev, "Failed to get VBAT supply: %d\n", error);
return error;
}
- error = regulator_enable(vbat_supply);
- if (error) {
- dev_err(dev, "Failed to enable VBAT: %d\n", error);
- return error;
- }
-
- error = devm_add_action_or_reset(dev, ppkb_regulator_disable,
- vbat_supply);
- if (error)
- return error;
-
ret = i2c_smbus_read_i2c_block_data(client, 0, sizeof(info), info);
if (ret != sizeof(info)) {
error = ret < 0 ? ret : -EIO;
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 871f858d0ba7..3724363d140e 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -717,7 +717,6 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct pxa27x_keypad *keypad;
struct input_dev *input_dev;
- struct resource *res;
int irq, error;
/* Driver need build keycode from device tree or pdata */
@@ -728,12 +727,6 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
if (irq < 0)
return -ENXIO;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "failed to get I/O memory\n");
- return -ENXIO;
- }
-
keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad),
GFP_KERNEL);
if (!keypad)
@@ -747,7 +740,7 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
keypad->input_dev = input_dev;
keypad->irq = irq;
- keypad->mmio_base = devm_ioremap_resource(&pdev->dev, res);
+ keypad->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(keypad->mmio_base))
return PTR_ERR(keypad->mmio_base);
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index 91aaa9fc43a4..9b093b042bf1 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -149,20 +149,20 @@ static int qt1070_probe(struct i2c_client *client)
if (!qt1070_identify(client))
return -ENODEV;
- data = kzalloc(sizeof(struct qt1070_data), GFP_KERNEL);
- input = input_allocate_device();
- if (!data || !input) {
- dev_err(&client->dev, "insufficient memory\n");
- err = -ENOMEM;
- goto err_free_mem;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct qt1070_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(&client->dev);
+ if (!input)
+ return -ENOMEM;
data->client = client;
data->input = input;
data->irq = client->irq;
input->name = "AT42QT1070 QTouch Sensor";
- input->dev.parent = &client->dev;
input->id.bustype = BUS_I2C;
/* Add the keycode */
@@ -185,19 +185,20 @@ static int qt1070_probe(struct i2c_client *client)
qt1070_write(client, RESET, 1);
msleep(QT1070_RESET_TIME);
- err = request_threaded_irq(client->irq, NULL, qt1070_interrupt,
- IRQF_TRIGGER_NONE | IRQF_ONESHOT,
- client->dev.driver->name, data);
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, qt1070_interrupt,
+ IRQF_TRIGGER_NONE | IRQF_ONESHOT,
+ client->dev.driver->name, data);
if (err) {
dev_err(&client->dev, "fail to request irq\n");
- goto err_free_mem;
+ return err;
}
/* Register the input device */
err = input_register_device(data->input);
if (err) {
dev_err(&client->dev, "Failed to register input device\n");
- goto err_free_irq;
+ return err;
}
i2c_set_clientdata(client, data);
@@ -206,24 +207,6 @@ static int qt1070_probe(struct i2c_client *client)
qt1070_read(client, DET_STATUS);
return 0;
-
-err_free_irq:
- free_irq(client->irq, data);
-err_free_mem:
- input_free_device(input);
- kfree(data);
- return err;
-}
-
-static void qt1070_remove(struct i2c_client *client)
-{
- struct qt1070_data *data = i2c_get_clientdata(client);
-
- /* Release IRQ */
- free_irq(client->irq, data);
-
- input_unregister_device(data->input);
- kfree(data);
}
static int qt1070_suspend(struct device *dev)
@@ -272,7 +255,6 @@ static struct i2c_driver qt1070_driver = {
},
.id_table = qt1070_id,
.probe = qt1070_probe,
- .remove = qt1070_remove,
};
module_i2c_driver(qt1070_driver);
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 599ea85cfd30..7e3b09642ab7 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -32,7 +32,7 @@
#define QT2160_NUM_LEDS_X 8
-#define QT2160_CYCLE_INTERVAL (2*HZ)
+#define QT2160_CYCLE_INTERVAL 2000 /* msec - 2 sec */
static unsigned char qt2160_key2code[] = {
KEY_0, KEY_1, KEY_2, KEY_3,
@@ -54,7 +54,6 @@ struct qt2160_led {
struct qt2160_data {
struct i2c_client *client;
struct input_dev *input;
- struct delayed_work dwork;
unsigned short keycodes[ARRAY_SIZE(qt2160_key2code)];
u16 key_matrix;
#ifdef CONFIG_LEDS_CLASS
@@ -155,10 +154,10 @@ static int qt2160_read_block(struct i2c_client *client,
return 0;
}
-static int qt2160_get_key_matrix(struct qt2160_data *qt2160)
+static void qt2160_get_key_matrix(struct input_dev *input)
{
+ struct qt2160_data *qt2160 = input_get_drvdata(input);
struct i2c_client *client = qt2160->client;
- struct input_dev *input = qt2160->input;
u8 regs[6];
u16 old_matrix, new_matrix;
int ret, i, mask;
@@ -173,7 +172,7 @@ static int qt2160_get_key_matrix(struct qt2160_data *qt2160)
if (ret) {
dev_err(&client->dev,
"could not perform chip read.\n");
- return ret;
+ return;
}
old_matrix = qt2160->key_matrix;
@@ -191,37 +190,17 @@ static int qt2160_get_key_matrix(struct qt2160_data *qt2160)
}
input_sync(input);
-
- return 0;
}
-static irqreturn_t qt2160_irq(int irq, void *_qt2160)
+static irqreturn_t qt2160_irq(int irq, void *data)
{
- struct qt2160_data *qt2160 = _qt2160;
+ struct input_dev *input = data;
- mod_delayed_work(system_wq, &qt2160->dwork, 0);
+ qt2160_get_key_matrix(input);
return IRQ_HANDLED;
}
-static void qt2160_schedule_read(struct qt2160_data *qt2160)
-{
- schedule_delayed_work(&qt2160->dwork, QT2160_CYCLE_INTERVAL);
-}
-
-static void qt2160_worker(struct work_struct *work)
-{
- struct qt2160_data *qt2160 =
- container_of(work, struct qt2160_data, dwork.work);
-
- dev_dbg(&qt2160->client->dev, "worker\n");
-
- qt2160_get_key_matrix(qt2160);
-
- /* Avoid device lock up by checking every so often */
- qt2160_schedule_read(qt2160);
-}
-
static int qt2160_read(struct i2c_client *client, u8 reg)
{
int ret;
@@ -260,7 +239,7 @@ static int qt2160_write(struct i2c_client *client, u8 reg, u8 data)
static int qt2160_register_leds(struct qt2160_data *qt2160)
{
struct i2c_client *client = qt2160->client;
- int ret;
+ int error;
int i;
for (i = 0; i < QT2160_NUM_LEDS_X; i++) {
@@ -273,9 +252,9 @@ static int qt2160_register_leds(struct qt2160_data *qt2160)
led->id = i;
led->qt2160 = qt2160;
- ret = led_classdev_register(&client->dev, &led->cdev);
- if (ret < 0)
- return ret;
+ error = devm_led_classdev_register(&client->dev, &led->cdev);
+ if (error)
+ return error;
}
/* Tur off LEDs */
@@ -286,14 +265,6 @@ static int qt2160_register_leds(struct qt2160_data *qt2160)
return 0;
}
-static void qt2160_unregister_leds(struct qt2160_data *qt2160)
-{
- int i;
-
- for (i = 0; i < QT2160_NUM_LEDS_X; i++)
- led_classdev_unregister(&qt2160->leds[i].cdev);
-}
-
#else
static inline int qt2160_register_leds(struct qt2160_data *qt2160)
@@ -301,10 +272,6 @@ static inline int qt2160_register_leds(struct qt2160_data *qt2160)
return 0;
}
-static inline void qt2160_unregister_leds(struct qt2160_data *qt2160)
-{
-}
-
#endif
static bool qt2160_identify(struct i2c_client *client)
@@ -345,12 +312,9 @@ static int qt2160_probe(struct i2c_client *client)
int i;
int error;
- /* Check functionality */
- error = i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_BYTE);
- if (!error) {
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
dev_err(&client->dev, "%s adapter not supported\n",
- dev_driver_string(&client->adapter->dev));
+ dev_driver_string(&client->adapter->dev));
return -ENODEV;
}
@@ -358,17 +322,16 @@ static int qt2160_probe(struct i2c_client *client)
return -ENODEV;
/* Chip is valid and active. Allocate structure */
- qt2160 = kzalloc(sizeof(struct qt2160_data), GFP_KERNEL);
- input = input_allocate_device();
- if (!qt2160 || !input) {
- dev_err(&client->dev, "insufficient memory\n");
- error = -ENOMEM;
- goto err_free_mem;
- }
+ qt2160 = devm_kzalloc(&client->dev, sizeof(*qt2160), GFP_KERNEL);
+ if (!qt2160)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(&client->dev);
+ if (!input)
+ return -ENOMEM;
qt2160->client = client;
qt2160->input = input;
- INIT_DELAYED_WORK(&qt2160->dwork, qt2160_worker);
input->name = "AT42QT2160 Touch Sense Keyboard";
input->id.bustype = BUS_I2C;
@@ -385,66 +348,48 @@ static int qt2160_probe(struct i2c_client *client)
}
__clear_bit(KEY_RESERVED, input->keybit);
+ input_set_drvdata(input, qt2160);
+
/* Calibrate device */
error = qt2160_write(client, QT2160_CMD_CALIBRATE, 1);
if (error) {
dev_err(&client->dev, "failed to calibrate device\n");
- goto err_free_mem;
+ return error;
}
if (client->irq) {
- error = request_irq(client->irq, qt2160_irq,
- IRQF_TRIGGER_FALLING, "qt2160", qt2160);
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, qt2160_irq,
+ IRQF_ONESHOT,
+ "qt2160", input);
if (error) {
dev_err(&client->dev,
"failed to allocate irq %d\n", client->irq);
- goto err_free_mem;
+ return error;
+ }
+ } else {
+ error = input_setup_polling(input, qt2160_get_key_matrix);
+ if (error) {
+ dev_err(&client->dev, "Failed to setup polling\n");
+ return error;
}
+ input_set_poll_interval(input, QT2160_CYCLE_INTERVAL);
}
error = qt2160_register_leds(qt2160);
if (error) {
dev_err(&client->dev, "Failed to register leds\n");
- goto err_free_irq;
+ return error;
}
error = input_register_device(qt2160->input);
if (error) {
dev_err(&client->dev,
"Failed to register input device\n");
- goto err_unregister_leds;
+ return error;
}
- i2c_set_clientdata(client, qt2160);
- qt2160_schedule_read(qt2160);
-
return 0;
-
-err_unregister_leds:
- qt2160_unregister_leds(qt2160);
-err_free_irq:
- if (client->irq)
- free_irq(client->irq, qt2160);
-err_free_mem:
- input_free_device(input);
- kfree(qt2160);
- return error;
-}
-
-static void qt2160_remove(struct i2c_client *client)
-{
- struct qt2160_data *qt2160 = i2c_get_clientdata(client);
-
- qt2160_unregister_leds(qt2160);
-
- /* Release IRQ so no queue will be scheduled */
- if (client->irq)
- free_irq(client->irq, qt2160);
-
- cancel_delayed_work_sync(&qt2160->dwork);
-
- input_unregister_device(qt2160->input);
- kfree(qt2160);
}
static const struct i2c_device_id qt2160_idtable[] = {
@@ -461,7 +406,6 @@ static struct i2c_driver qt2160_driver = {
.id_table = qt2160_idtable,
.probe = qt2160_probe,
- .remove = qt2160_remove,
};
module_i2c_driver(qt2160_driver);
diff --git a/drivers/input/keyboard/sun4i-lradc-keys.c b/drivers/input/keyboard/sun4i-lradc-keys.c
index 15c15c0958b0..f304cab0ebdb 100644
--- a/drivers/input/keyboard/sun4i-lradc-keys.c
+++ b/drivers/input/keyboard/sun4i-lradc-keys.c
@@ -21,10 +21,11 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/pm_wakeup.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/slab.h>
@@ -307,8 +308,7 @@ static int sun4i_lradc_probe(struct platform_device *pdev)
input_set_drvdata(lradc->input, lradc);
- lradc->base = devm_ioremap_resource(dev,
- platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ lradc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lradc->base))
return PTR_ERR(lradc->base);
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index 2f745cabf4f2..8af59ced1ec2 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -24,6 +24,8 @@
#define TCA6416_INVERT 2
#define TCA6416_DIRECTION 3
+#define TCA6416_POLL_INTERVAL 100 /* msec */
+
static const struct i2c_device_id tca6416_id[] = {
{ "tca6416-keys", 16, },
{ "tca6408-keys", 8, },
@@ -43,7 +45,6 @@ struct tca6416_keypad_chip {
struct i2c_client *client;
struct input_dev *input;
- struct delayed_work dwork;
int io_size;
int irqnum;
u16 pinmask;
@@ -85,9 +86,9 @@ static int tca6416_read_reg(struct tca6416_keypad_chip *chip, int reg, u16 *val)
return 0;
}
-static void tca6416_keys_scan(struct tca6416_keypad_chip *chip)
+static void tca6416_keys_scan(struct input_dev *input)
{
- struct input_dev *input = chip->input;
+ struct tca6416_keypad_chip *chip = input_get_drvdata(input);
u16 reg_val, val;
int error, i, pin_index;
@@ -122,33 +123,20 @@ static void tca6416_keys_scan(struct tca6416_keypad_chip *chip)
*/
static irqreturn_t tca6416_keys_isr(int irq, void *dev_id)
{
- struct tca6416_keypad_chip *chip = dev_id;
-
- tca6416_keys_scan(chip);
+ tca6416_keys_scan(dev_id);
return IRQ_HANDLED;
}
-static void tca6416_keys_work_func(struct work_struct *work)
-{
- struct tca6416_keypad_chip *chip =
- container_of(work, struct tca6416_keypad_chip, dwork.work);
-
- tca6416_keys_scan(chip);
- schedule_delayed_work(&chip->dwork, msecs_to_jiffies(100));
-}
-
static int tca6416_keys_open(struct input_dev *dev)
{
struct tca6416_keypad_chip *chip = input_get_drvdata(dev);
- /* Get initial device state in case it has switches */
- tca6416_keys_scan(chip);
-
- if (chip->use_polling)
- schedule_delayed_work(&chip->dwork, msecs_to_jiffies(100));
- else
- enable_irq(chip->irqnum);
+ if (!chip->use_polling) {
+ /* Get initial device state in case it has switches */
+ tca6416_keys_scan(dev);
+ enable_irq(chip->client->irq);
+ }
return 0;
}
@@ -157,10 +145,8 @@ static void tca6416_keys_close(struct input_dev *dev)
{
struct tca6416_keypad_chip *chip = input_get_drvdata(dev);
- if (chip->use_polling)
- cancel_delayed_work_sync(&chip->dwork);
- else
- disable_irq(chip->irqnum);
+ if (!chip->use_polling)
+ disable_irq(chip->client->irq);
}
static int tca6416_setup_registers(struct tca6416_keypad_chip *chip)
@@ -216,12 +202,15 @@ static int tca6416_keypad_probe(struct i2c_client *client)
return -EINVAL;
}
- chip = kzalloc(struct_size(chip, buttons, pdata->nbuttons), GFP_KERNEL);
- input = input_allocate_device();
- if (!chip || !input) {
- error = -ENOMEM;
- goto fail1;
- }
+ chip = devm_kzalloc(&client->dev,
+ struct_size(chip, buttons, pdata->nbuttons),
+ GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(&client->dev);
+ if (!input)
+ return -ENOMEM;
chip->client = client;
chip->input = input;
@@ -229,11 +218,8 @@ static int tca6416_keypad_probe(struct i2c_client *client)
chip->pinmask = pdata->pinmask;
chip->use_polling = pdata->use_polling;
- INIT_DELAYED_WORK(&chip->dwork, tca6416_keys_work_func);
-
input->phys = "tca6416-keys/input0";
input->name = client->name;
- input->dev.parent = &client->dev;
input->open = tca6416_keys_open;
input->close = tca6416_keys_close;
@@ -263,24 +249,28 @@ static int tca6416_keypad_probe(struct i2c_client *client)
*/
error = tca6416_setup_registers(chip);
if (error)
- goto fail1;
+ return error;
- if (!chip->use_polling) {
- if (pdata->irq_is_gpio)
- chip->irqnum = gpio_to_irq(client->irq);
- else
- chip->irqnum = client->irq;
-
- error = request_threaded_irq(chip->irqnum, NULL,
- tca6416_keys_isr,
- IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT | IRQF_NO_AUTOEN,
- "tca6416-keypad", chip);
+ if (chip->use_polling) {
+ error = input_setup_polling(input, tca6416_keys_scan);
+ if (error) {
+ dev_err(&client->dev, "Failed to setup polling\n");
+ return error;
+ }
+
+ input_set_poll_interval(input, TCA6416_POLL_INTERVAL);
+ } else {
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, tca6416_keys_isr,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT |
+ IRQF_NO_AUTOEN,
+ "tca6416-keypad", input);
if (error) {
dev_dbg(&client->dev,
"Unable to claim irq %d; error %d\n",
- chip->irqnum, error);
- goto fail1;
+ client->irq, error);
+ return error;
}
}
@@ -288,70 +278,19 @@ static int tca6416_keypad_probe(struct i2c_client *client)
if (error) {
dev_dbg(&client->dev,
"Unable to register input device, error: %d\n", error);
- goto fail2;
+ return error;
}
i2c_set_clientdata(client, chip);
- device_init_wakeup(&client->dev, 1);
return 0;
-
-fail2:
- if (!chip->use_polling) {
- free_irq(chip->irqnum, chip);
- enable_irq(chip->irqnum);
- }
-fail1:
- input_free_device(input);
- kfree(chip);
- return error;
}
-static void tca6416_keypad_remove(struct i2c_client *client)
-{
- struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
-
- if (!chip->use_polling) {
- free_irq(chip->irqnum, chip);
- enable_irq(chip->irqnum);
- }
-
- input_unregister_device(chip->input);
- kfree(chip);
-}
-
-static int tca6416_keypad_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
-
- if (device_may_wakeup(dev))
- enable_irq_wake(chip->irqnum);
-
- return 0;
-}
-
-static int tca6416_keypad_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
-
- if (device_may_wakeup(dev))
- disable_irq_wake(chip->irqnum);
-
- return 0;
-}
-
-static DEFINE_SIMPLE_DEV_PM_OPS(tca6416_keypad_dev_pm_ops,
- tca6416_keypad_suspend, tca6416_keypad_resume);
-
static struct i2c_driver tca6416_keypad_driver = {
.driver = {
.name = "tca6416-keypad",
- .pm = pm_sleep_ptr(&tca6416_keypad_dev_pm_ops),
},
.probe = tca6416_keypad_probe,
- .remove = tca6416_keypad_remove,
.id_table = tca6416_id,
};
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index d5a6c7d8eb25..c9a823ea45d0 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -640,7 +640,7 @@ static int tegra_kbc_probe(struct platform_device *pdev)
timer_setup(&kbc->timer, tegra_kbc_keypress_timer, 0);
- kbc->mmio = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ kbc->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kbc->mmio))
return PTR_ERR(kbc->mmio);
diff --git a/drivers/input/keyboard/tm2-touchkey.c b/drivers/input/keyboard/tm2-touchkey.c
index 75bd3ea51194..0fd761ae052f 100644
--- a/drivers/input/keyboard/tm2-touchkey.c
+++ b/drivers/input/keyboard/tm2-touchkey.c
@@ -19,7 +19,6 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pm.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 8a320e6218e3..6ba984d7f0b1 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -791,10 +791,10 @@ config INPUT_IQS626A
module will be called iqs626a.
config INPUT_IQS7222
- tristate "Azoteq IQS7222A/B/C capacitive touch controller"
+ tristate "Azoteq IQS7222A/B/C/D capacitive touch controller"
depends on I2C
help
- Say Y to enable support for the Azoteq IQS7222A/B/C family
+ Say Y to enable support for the Azoteq IQS7222A/B/C/D family
of capacitive touch controllers.
To compile this driver as a module, choose M here: the
diff --git a/drivers/input/misc/cpcap-pwrbutton.c b/drivers/input/misc/cpcap-pwrbutton.c
index 879790bbf9fe..85cddb84717a 100644
--- a/drivers/input/misc/cpcap-pwrbutton.c
+++ b/drivers/input/misc/cpcap-pwrbutton.c
@@ -1,16 +1,8 @@
-/**
+// SPDX-License-Identifier: GPL-2.0-only
+/*
* CPCAP Power Button Input Driver
*
* Copyright (C) 2017 Sebastian Reichel <sre@kernel.org>
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file "COPYING" in the main directory of this
- * archive for more details.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/input/misc/da9063_onkey.c b/drivers/input/misc/da9063_onkey.c
index b14a389600c9..74808bae326a 100644
--- a/drivers/input/misc/da9063_onkey.c
+++ b/drivers/input/misc/da9063_onkey.c
@@ -10,6 +10,7 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
#include <linux/workqueue.h>
#include <linux/regmap.h>
#include <linux/of.h>
@@ -251,6 +252,14 @@ static int da9063_onkey_probe(struct platform_device *pdev)
return error;
}
+ error = dev_pm_set_wake_irq(&pdev->dev, irq);
+ if (error)
+ dev_warn(&pdev->dev,
+ "Failed to set IRQ %d as a wake IRQ: %d\n",
+ irq, error);
+ else
+ device_init_wakeup(&pdev->dev, true);
+
error = input_register_device(onkey->input);
if (error) {
dev_err(&pdev->dev,
diff --git a/drivers/input/misc/gpio-vibra.c b/drivers/input/misc/gpio-vibra.c
index 134a1309ba92..ad44b4d18a2a 100644
--- a/drivers/input/misc/gpio-vibra.c
+++ b/drivers/input/misc/gpio-vibra.c
@@ -18,7 +18,7 @@
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
@@ -113,22 +113,14 @@ static int gpio_vibrator_probe(struct platform_device *pdev)
return -ENOMEM;
vibrator->vcc = devm_regulator_get(&pdev->dev, "vcc");
- err = PTR_ERR_OR_ZERO(vibrator->vcc);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to request regulator: %d\n",
- err);
- return err;
- }
+ if (IS_ERR(vibrator->vcc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(vibrator->vcc),
+ "Failed to request regulator\n");
vibrator->gpio = devm_gpiod_get(&pdev->dev, "enable", GPIOD_OUT_LOW);
- err = PTR_ERR_OR_ZERO(vibrator->gpio);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to request main gpio: %d\n",
- err);
- return err;
- }
+ if (IS_ERR(vibrator->gpio))
+ return dev_err_probe(&pdev->dev, PTR_ERR(vibrator->gpio),
+ "Failed to request main gpio\n");
INIT_WORK(&vibrator->play_work, gpio_vibrator_play_work);
diff --git a/drivers/input/misc/iqs269a.c b/drivers/input/misc/iqs269a.c
index 1272ef7b5794..c0a085639870 100644
--- a/drivers/input/misc/iqs269a.c
+++ b/drivers/input/misc/iqs269a.c
@@ -17,9 +17,9 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/input/misc/iqs626a.c b/drivers/input/misc/iqs626a.c
index 50035c25c3f7..0dab54d3a060 100644
--- a/drivers/input/misc/iqs626a.c
+++ b/drivers/input/misc/iqs626a.c
@@ -19,8 +19,8 @@
#include <linux/input/touchscreen.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
index 096b0925f41b..36aeeae77611 100644
--- a/drivers/input/misc/iqs7222.c
+++ b/drivers/input/misc/iqs7222.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Azoteq IQS7222A/B/C Capacitive Touch Controller
+ * Azoteq IQS7222A/B/C/D Capacitive Touch Controller
*
* Copyright (C) 2022 Jeff LaBundy <jeff@labundy.com>
*/
@@ -12,11 +12,12 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/input.h>
+#include <linux/input/touchscreen.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
@@ -25,6 +26,7 @@
#define IQS7222_PROD_NUM_A 840
#define IQS7222_PROD_NUM_B 698
#define IQS7222_PROD_NUM_C 863
+#define IQS7222_PROD_NUM_D 1046
#define IQS7222_SYS_STATUS 0x10
#define IQS7222_SYS_STATUS_RESET BIT(3)
@@ -54,6 +56,7 @@
#define IQS7222_EVENT_MASK_ATI BIT(12)
#define IQS7222_EVENT_MASK_SLDR BIT(10)
+#define IQS7222_EVENT_MASK_TPAD IQS7222_EVENT_MASK_SLDR
#define IQS7222_EVENT_MASK_TOUCH BIT(1)
#define IQS7222_EVENT_MASK_PROX BIT(0)
@@ -71,6 +74,7 @@
#define IQS7222_MAX_COLS_CHAN 6
#define IQS7222_MAX_COLS_FILT 2
#define IQS7222_MAX_COLS_SLDR 11
+#define IQS7222_MAX_COLS_TPAD 24
#define IQS7222_MAX_COLS_GPIO 3
#define IQS7222_MAX_COLS_SYS 13
@@ -102,16 +106,18 @@ enum iqs7222_reg_grp_id {
IQS7222_REG_GRP_BTN,
IQS7222_REG_GRP_CHAN,
IQS7222_REG_GRP_SLDR,
+ IQS7222_REG_GRP_TPAD,
IQS7222_REG_GRP_GPIO,
IQS7222_REG_GRP_SYS,
IQS7222_NUM_REG_GRPS
};
static const char * const iqs7222_reg_grp_names[IQS7222_NUM_REG_GRPS] = {
- [IQS7222_REG_GRP_CYCLE] = "cycle",
- [IQS7222_REG_GRP_CHAN] = "channel",
- [IQS7222_REG_GRP_SLDR] = "slider",
- [IQS7222_REG_GRP_GPIO] = "gpio",
+ [IQS7222_REG_GRP_CYCLE] = "cycle-%d",
+ [IQS7222_REG_GRP_CHAN] = "channel-%d",
+ [IQS7222_REG_GRP_SLDR] = "slider-%d",
+ [IQS7222_REG_GRP_TPAD] = "trackpad",
+ [IQS7222_REG_GRP_GPIO] = "gpio-%d",
};
static const unsigned int iqs7222_max_cols[IQS7222_NUM_REG_GRPS] = {
@@ -122,6 +128,7 @@ static const unsigned int iqs7222_max_cols[IQS7222_NUM_REG_GRPS] = {
[IQS7222_REG_GRP_CHAN] = IQS7222_MAX_COLS_CHAN,
[IQS7222_REG_GRP_FILT] = IQS7222_MAX_COLS_FILT,
[IQS7222_REG_GRP_SLDR] = IQS7222_MAX_COLS_SLDR,
+ [IQS7222_REG_GRP_TPAD] = IQS7222_MAX_COLS_TPAD,
[IQS7222_REG_GRP_GPIO] = IQS7222_MAX_COLS_GPIO,
[IQS7222_REG_GRP_SYS] = IQS7222_MAX_COLS_SYS,
};
@@ -130,8 +137,10 @@ static const unsigned int iqs7222_gpio_links[] = { 2, 5, 6, };
struct iqs7222_event_desc {
const char *name;
+ u16 link;
u16 mask;
u16 val;
+ u16 strict;
u16 enable;
enum iqs7222_reg_key_id reg_key;
};
@@ -188,6 +197,93 @@ static const struct iqs7222_event_desc iqs7222_sl_events[] = {
},
};
+static const struct iqs7222_event_desc iqs7222_tp_events[] = {
+ {
+ .name = "event-press",
+ .link = BIT(7),
+ },
+ {
+ .name = "event-tap",
+ .link = BIT(0),
+ .mask = BIT(0),
+ .val = BIT(0),
+ .enable = BIT(0),
+ .reg_key = IQS7222_REG_KEY_TAP,
+ },
+ {
+ .name = "event-swipe-x-pos",
+ .link = BIT(2),
+ .mask = BIT(2) | BIT(1),
+ .val = BIT(2),
+ .strict = BIT(4),
+ .enable = BIT(1),
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ },
+ {
+ .name = "event-swipe-y-pos",
+ .link = BIT(3),
+ .mask = BIT(3) | BIT(1),
+ .val = BIT(3),
+ .strict = BIT(3),
+ .enable = BIT(1),
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ },
+ {
+ .name = "event-swipe-x-neg",
+ .link = BIT(4),
+ .mask = BIT(4) | BIT(1),
+ .val = BIT(4),
+ .strict = BIT(4),
+ .enable = BIT(1),
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ },
+ {
+ .name = "event-swipe-y-neg",
+ .link = BIT(5),
+ .mask = BIT(5) | BIT(1),
+ .val = BIT(5),
+ .strict = BIT(3),
+ .enable = BIT(1),
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ },
+ {
+ .name = "event-flick-x-pos",
+ .link = BIT(2),
+ .mask = BIT(2) | BIT(1),
+ .val = BIT(2) | BIT(1),
+ .strict = BIT(4),
+ .enable = BIT(2),
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ },
+ {
+ .name = "event-flick-y-pos",
+ .link = BIT(3),
+ .mask = BIT(3) | BIT(1),
+ .val = BIT(3) | BIT(1),
+ .strict = BIT(3),
+ .enable = BIT(2),
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ },
+ {
+ .name = "event-flick-x-neg",
+ .link = BIT(4),
+ .mask = BIT(4) | BIT(1),
+ .val = BIT(4) | BIT(1),
+ .strict = BIT(4),
+ .enable = BIT(2),
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ },
+ {
+ .name = "event-flick-y-neg",
+ .link = BIT(5),
+ .mask = BIT(5) | BIT(1),
+ .val = BIT(5) | BIT(1),
+ .strict = BIT(3),
+ .enable = BIT(2),
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ },
+};
+
struct iqs7222_reg_grp_desc {
u16 base;
int num_row;
@@ -524,6 +620,62 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
},
},
+ {
+ .prod_num = IQS7222_PROD_NUM_D,
+ .fw_major = 0,
+ .fw_minor = 37,
+ .touch_link = 1770,
+ .allow_offset = 9,
+ .event_offset = 10,
+ .comms_offset = 11,
+ .reg_grps = {
+ [IQS7222_REG_GRP_STAT] = {
+ .base = IQS7222_SYS_STATUS,
+ .num_row = 1,
+ .num_col = 7,
+ },
+ [IQS7222_REG_GRP_CYCLE] = {
+ .base = 0x8000,
+ .num_row = 7,
+ .num_col = 2,
+ },
+ [IQS7222_REG_GRP_GLBL] = {
+ .base = 0x8700,
+ .num_row = 1,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_BTN] = {
+ .base = 0x9000,
+ .num_row = 14,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_CHAN] = {
+ .base = 0xA000,
+ .num_row = 14,
+ .num_col = 4,
+ },
+ [IQS7222_REG_GRP_FILT] = {
+ .base = 0xAE00,
+ .num_row = 1,
+ .num_col = 2,
+ },
+ [IQS7222_REG_GRP_TPAD] = {
+ .base = 0xB000,
+ .num_row = 1,
+ .num_col = 24,
+ },
+ [IQS7222_REG_GRP_GPIO] = {
+ .base = 0xC000,
+ .num_row = 3,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_SYS] = {
+ .base = IQS7222_SYS_SETUP,
+ .num_row = 1,
+ .num_col = 12,
+ },
+ },
+ },
};
struct iqs7222_prop_desc {
@@ -1009,6 +1161,123 @@ static const struct iqs7222_prop_desc iqs7222_props[] = {
.label = "maximum gesture time",
},
{
+ .name = "azoteq,num-rows",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_offset = 0,
+ .reg_shift = 4,
+ .reg_width = 4,
+ .val_min = 1,
+ .val_max = 12,
+ .label = "number of rows",
+ },
+ {
+ .name = "azoteq,num-cols",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_offset = 0,
+ .reg_shift = 0,
+ .reg_width = 4,
+ .val_min = 1,
+ .val_max = 12,
+ .label = "number of columns",
+ },
+ {
+ .name = "azoteq,lower-cal-y",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_offset = 1,
+ .reg_shift = 8,
+ .reg_width = 8,
+ .label = "lower vertical calibration",
+ },
+ {
+ .name = "azoteq,lower-cal-x",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_offset = 1,
+ .reg_shift = 0,
+ .reg_width = 8,
+ .label = "lower horizontal calibration",
+ },
+ {
+ .name = "azoteq,upper-cal-y",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_offset = 2,
+ .reg_shift = 8,
+ .reg_width = 8,
+ .label = "upper vertical calibration",
+ },
+ {
+ .name = "azoteq,upper-cal-x",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_offset = 2,
+ .reg_shift = 0,
+ .reg_width = 8,
+ .label = "upper horizontal calibration",
+ },
+ {
+ .name = "azoteq,top-speed",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_offset = 3,
+ .reg_shift = 8,
+ .reg_width = 8,
+ .val_pitch = 4,
+ .label = "top speed",
+ },
+ {
+ .name = "azoteq,bottom-speed",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_offset = 3,
+ .reg_shift = 0,
+ .reg_width = 8,
+ .label = "bottom speed",
+ },
+ {
+ .name = "azoteq,gesture-min-ms",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_key = IQS7222_REG_KEY_TAP,
+ .reg_offset = 20,
+ .reg_shift = 8,
+ .reg_width = 8,
+ .val_pitch = 16,
+ .label = "minimum gesture time",
+ },
+ {
+ .name = "azoteq,gesture-max-ms",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ .reg_offset = 21,
+ .reg_shift = 8,
+ .reg_width = 8,
+ .val_pitch = 16,
+ .label = "maximum gesture time",
+ },
+ {
+ .name = "azoteq,gesture-max-ms",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_key = IQS7222_REG_KEY_TAP,
+ .reg_offset = 21,
+ .reg_shift = 0,
+ .reg_width = 8,
+ .val_pitch = 16,
+ .label = "maximum gesture time",
+ },
+ {
+ .name = "azoteq,gesture-dist",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_key = IQS7222_REG_KEY_TAP,
+ .reg_offset = 22,
+ .reg_shift = 0,
+ .reg_width = 16,
+ .label = "gesture distance",
+ },
+ {
+ .name = "azoteq,gesture-dist",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ .reg_offset = 23,
+ .reg_shift = 0,
+ .reg_width = 16,
+ .label = "gesture distance",
+ },
+ {
.name = "drive-open-drain",
.reg_grp = IQS7222_REG_GRP_GPIO,
.reg_offset = 0,
@@ -1091,16 +1360,19 @@ struct iqs7222_private {
struct gpio_desc *irq_gpio;
struct i2c_client *client;
struct input_dev *keypad;
+ struct touchscreen_properties prop;
unsigned int kp_type[IQS7222_MAX_CHAN][ARRAY_SIZE(iqs7222_kp_events)];
unsigned int kp_code[IQS7222_MAX_CHAN][ARRAY_SIZE(iqs7222_kp_events)];
unsigned int sl_code[IQS7222_MAX_SLDR][ARRAY_SIZE(iqs7222_sl_events)];
unsigned int sl_axis[IQS7222_MAX_SLDR];
+ unsigned int tp_code[ARRAY_SIZE(iqs7222_tp_events)];
u16 cycle_setup[IQS7222_MAX_CHAN / 2][IQS7222_MAX_COLS_CYCLE];
u16 glbl_setup[IQS7222_MAX_COLS_GLBL];
u16 btn_setup[IQS7222_MAX_CHAN][IQS7222_MAX_COLS_BTN];
u16 chan_setup[IQS7222_MAX_CHAN][IQS7222_MAX_COLS_CHAN];
u16 filt_setup[IQS7222_MAX_COLS_FILT];
u16 sldr_setup[IQS7222_MAX_SLDR][IQS7222_MAX_COLS_SLDR];
+ u16 tpad_setup[IQS7222_MAX_COLS_TPAD];
u16 gpio_setup[ARRAY_SIZE(iqs7222_gpio_links)][IQS7222_MAX_COLS_GPIO];
u16 sys_setup[IQS7222_MAX_COLS_SYS];
};
@@ -1127,6 +1399,9 @@ static u16 *iqs7222_setup(struct iqs7222_private *iqs7222,
case IQS7222_REG_GRP_SLDR:
return iqs7222->sldr_setup[row];
+ case IQS7222_REG_GRP_TPAD:
+ return iqs7222->tpad_setup;
+
case IQS7222_REG_GRP_GPIO:
return iqs7222->gpio_setup[row];
@@ -1381,9 +1656,6 @@ static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
if (error)
return error;
- sys_setup &= ~IQS7222_SYS_SETUP_INTF_MODE_MASK;
- sys_setup &= ~IQS7222_SYS_SETUP_PWR_MODE_MASK;
-
for (i = 0; i < IQS7222_NUM_RETRIES; i++) {
/*
* Trigger ATI from streaming and normal-power modes so that
@@ -1561,8 +1833,11 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
return error;
}
- if (dir == READ)
+ if (dir == READ) {
+ iqs7222->sys_setup[0] &= ~IQS7222_SYS_SETUP_INTF_MODE_MASK;
+ iqs7222->sys_setup[0] &= ~IQS7222_SYS_SETUP_PWR_MODE_MASK;
return 0;
+ }
return iqs7222_ati_trigger(iqs7222);
}
@@ -1936,6 +2211,14 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222,
ref_setup[4] = dev_desc->touch_link;
if (fwnode_property_present(chan_node, "azoteq,use-prox"))
ref_setup[4] -= 2;
+ } else if (dev_desc->reg_grps[IQS7222_REG_GRP_TPAD].num_row &&
+ fwnode_property_present(chan_node,
+ "azoteq,counts-filt-enable")) {
+ /*
+ * In the case of IQS7222D, however, the reference mode field
+ * is partially repurposed as a counts filter enable control.
+ */
+ chan_setup[0] |= IQS7222_CHAN_SETUP_0_REF_MODE_REF;
}
if (fwnode_property_present(chan_node, "azoteq,rx-enable")) {
@@ -2278,6 +2561,136 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222,
IQS7222_REG_KEY_NO_WHEEL);
}
+static int iqs7222_parse_tpad(struct iqs7222_private *iqs7222,
+ struct fwnode_handle *tpad_node, int tpad_index)
+{
+ const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc;
+ struct touchscreen_properties *prop = &iqs7222->prop;
+ struct i2c_client *client = iqs7222->client;
+ int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row;
+ int count, error, i;
+ u16 *event_mask = &iqs7222->sys_setup[dev_desc->event_offset];
+ u16 *tpad_setup = iqs7222->tpad_setup;
+ unsigned int chan_sel[12];
+
+ error = iqs7222_parse_props(iqs7222, tpad_node, tpad_index,
+ IQS7222_REG_GRP_TPAD,
+ IQS7222_REG_KEY_NONE);
+ if (error)
+ return error;
+
+ count = fwnode_property_count_u32(tpad_node, "azoteq,channel-select");
+ if (count < 0) {
+ dev_err(&client->dev, "Failed to count %s channels: %d\n",
+ fwnode_get_name(tpad_node), count);
+ return count;
+ } else if (!count || count > ARRAY_SIZE(chan_sel)) {
+ dev_err(&client->dev, "Invalid number of %s channels\n",
+ fwnode_get_name(tpad_node));
+ return -EINVAL;
+ }
+
+ error = fwnode_property_read_u32_array(tpad_node,
+ "azoteq,channel-select",
+ chan_sel, count);
+ if (error) {
+ dev_err(&client->dev, "Failed to read %s channels: %d\n",
+ fwnode_get_name(tpad_node), error);
+ return error;
+ }
+
+ tpad_setup[6] &= ~GENMASK(num_chan - 1, 0);
+
+ for (i = 0; i < ARRAY_SIZE(chan_sel); i++) {
+ tpad_setup[8 + i] = 0;
+ if (i >= count || chan_sel[i] == U8_MAX)
+ continue;
+
+ if (chan_sel[i] >= num_chan) {
+ dev_err(&client->dev, "Invalid %s channel: %u\n",
+ fwnode_get_name(tpad_node), chan_sel[i]);
+ return -EINVAL;
+ }
+
+ /*
+ * The following fields indicate which channels participate in
+ * the trackpad, as well as each channel's relative placement.
+ */
+ tpad_setup[6] |= BIT(chan_sel[i]);
+ tpad_setup[8 + i] = chan_sel[i] * 34 + 1072;
+ }
+
+ tpad_setup[7] = dev_desc->touch_link;
+ if (fwnode_property_present(tpad_node, "azoteq,use-prox"))
+ tpad_setup[7] -= 2;
+
+ for (i = 0; i < ARRAY_SIZE(iqs7222_tp_events); i++)
+ tpad_setup[20] &= ~(iqs7222_tp_events[i].strict |
+ iqs7222_tp_events[i].enable);
+
+ for (i = 0; i < ARRAY_SIZE(iqs7222_tp_events); i++) {
+ const char *event_name = iqs7222_tp_events[i].name;
+ struct fwnode_handle *event_node;
+
+ event_node = fwnode_get_named_child_node(tpad_node, event_name);
+ if (!event_node)
+ continue;
+
+ if (fwnode_property_present(event_node,
+ "azoteq,gesture-angle-tighten"))
+ tpad_setup[20] |= iqs7222_tp_events[i].strict;
+
+ tpad_setup[20] |= iqs7222_tp_events[i].enable;
+
+ error = iqs7222_parse_event(iqs7222, event_node, tpad_index,
+ IQS7222_REG_GRP_TPAD,
+ iqs7222_tp_events[i].reg_key,
+ iqs7222_tp_events[i].link, 1566,
+ NULL,
+ &iqs7222->tp_code[i]);
+ fwnode_handle_put(event_node);
+ if (error)
+ return error;
+
+ if (!dev_desc->event_offset)
+ continue;
+
+ /*
+ * The press/release event is determined based on whether the
+ * coordinate fields report 0xFFFF and solely relies on touch
+ * or proximity interrupts to be unmasked.
+ */
+ if (i)
+ *event_mask |= IQS7222_EVENT_MASK_TPAD;
+ else if (tpad_setup[7] == dev_desc->touch_link)
+ *event_mask |= IQS7222_EVENT_MASK_TOUCH;
+ else
+ *event_mask |= IQS7222_EVENT_MASK_PROX;
+ }
+
+ if (!iqs7222->tp_code[0])
+ return 0;
+
+ input_set_abs_params(iqs7222->keypad, ABS_X,
+ 0, (tpad_setup[4] ? : 1) - 1, 0, 0);
+
+ input_set_abs_params(iqs7222->keypad, ABS_Y,
+ 0, (tpad_setup[5] ? : 1) - 1, 0, 0);
+
+ touchscreen_parse_properties(iqs7222->keypad, false, prop);
+
+ if (prop->max_x >= U16_MAX || prop->max_y >= U16_MAX) {
+ dev_err(&client->dev, "Invalid trackpad size: %u*%u\n",
+ prop->max_x, prop->max_y);
+ return -EINVAL;
+ }
+
+ tpad_setup[4] = prop->max_x + 1;
+ tpad_setup[5] = prop->max_y + 1;
+
+ return 0;
+}
+
static int (*iqs7222_parse_extra[IQS7222_NUM_REG_GRPS])
(struct iqs7222_private *iqs7222,
struct fwnode_handle *reg_grp_node,
@@ -2285,6 +2698,7 @@ static int (*iqs7222_parse_extra[IQS7222_NUM_REG_GRPS])
[IQS7222_REG_GRP_CYCLE] = iqs7222_parse_cycle,
[IQS7222_REG_GRP_CHAN] = iqs7222_parse_chan,
[IQS7222_REG_GRP_SLDR] = iqs7222_parse_sldr,
+ [IQS7222_REG_GRP_TPAD] = iqs7222_parse_tpad,
};
static int iqs7222_parse_reg_grp(struct iqs7222_private *iqs7222,
@@ -2298,7 +2712,7 @@ static int iqs7222_parse_reg_grp(struct iqs7222_private *iqs7222,
if (iqs7222_reg_grp_names[reg_grp]) {
char reg_grp_name[16];
- snprintf(reg_grp_name, sizeof(reg_grp_name), "%s-%d",
+ snprintf(reg_grp_name, sizeof(reg_grp_name),
iqs7222_reg_grp_names[reg_grp], reg_grp_index);
reg_grp_node = device_get_named_child_node(&client->dev,
@@ -2346,8 +2760,8 @@ static int iqs7222_parse_all(struct iqs7222_private *iqs7222)
continue;
/*
- * The IQS7222C exposes multiple GPIO and must be informed
- * as to which GPIO this group represents.
+ * The IQS7222C and IQS7222D expose multiple GPIO and must be
+ * informed as to which GPIO this group represents.
*/
for (j = 0; j < ARRAY_SIZE(iqs7222_gpio_links); j++)
gpio_setup[0] &= ~BIT(iqs7222_gpio_links[j]);
@@ -2480,6 +2894,41 @@ static int iqs7222_report(struct iqs7222_private *iqs7222)
iqs7222->sl_code[i][j], 0);
}
+ for (i = 0; i < dev_desc->reg_grps[IQS7222_REG_GRP_TPAD].num_row; i++) {
+ u16 tpad_pos_x = le16_to_cpu(status[4]);
+ u16 tpad_pos_y = le16_to_cpu(status[5]);
+ u16 state = le16_to_cpu(status[6]);
+
+ input_report_key(iqs7222->keypad, iqs7222->tp_code[0],
+ tpad_pos_x < U16_MAX);
+
+ if (tpad_pos_x < U16_MAX)
+ touchscreen_report_pos(iqs7222->keypad, &iqs7222->prop,
+ tpad_pos_x, tpad_pos_y, false);
+
+ if (!(le16_to_cpu(status[1]) & IQS7222_EVENT_MASK_TPAD))
+ continue;
+
+ /*
+ * Skip the press/release event, as it does not have separate
+ * status fields and is handled separately.
+ */
+ for (j = 1; j < ARRAY_SIZE(iqs7222_tp_events); j++) {
+ u16 mask = iqs7222_tp_events[j].mask;
+ u16 val = iqs7222_tp_events[j].val;
+
+ input_report_key(iqs7222->keypad,
+ iqs7222->tp_code[j],
+ (state & mask) == val);
+ }
+
+ input_sync(iqs7222->keypad);
+
+ for (j = 1; j < ARRAY_SIZE(iqs7222_tp_events); j++)
+ input_report_key(iqs7222->keypad,
+ iqs7222->tp_code[j], 0);
+ }
+
input_sync(iqs7222->keypad);
return 0;
@@ -2584,6 +3033,7 @@ static const struct of_device_id iqs7222_of_match[] = {
{ .compatible = "azoteq,iqs7222a" },
{ .compatible = "azoteq,iqs7222b" },
{ .compatible = "azoteq,iqs7222c" },
+ { .compatible = "azoteq,iqs7222d" },
{ }
};
MODULE_DEVICE_TABLE(of, iqs7222_of_match);
@@ -2598,5 +3048,5 @@ static struct i2c_driver iqs7222_i2c_driver = {
module_i2c_driver(iqs7222_i2c_driver);
MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>");
-MODULE_DESCRIPTION("Azoteq IQS7222A/B/C Capacitive Touch Controller");
+MODULE_DESCRIPTION("Azoteq IQS7222A/B/C/D Capacitive Touch Controller");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
index 76a190b2220b..662b436d765b 100644
--- a/drivers/input/misc/mma8450.c
+++ b/drivers/input/misc/mma8450.c
@@ -11,7 +11,7 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/input.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#define MMA8450_DRV_NAME "mma8450"
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
index 74d77d8aaeff..ba747c5b2b5f 100644
--- a/drivers/input/misc/pm8941-pwrkey.c
+++ b/drivers/input/misc/pm8941-pwrkey.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
index 04cb87efd799..5c288fe7accf 100644
--- a/drivers/input/misc/pm8xxx-vibrator.c
+++ b/drivers/input/misc/pm8xxx-vibrator.c
@@ -7,7 +7,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index 89fb137e3715..c406a1cca5c4 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -12,7 +12,6 @@
#include <linux/regmap.h>
#include <linux/log2.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#define PON_CNTL_1 0x1C
#define PON_CNTL_PULL_UP BIT(7)
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index 3cf1812384e6..1e731d8397c6 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -132,13 +132,8 @@ static int pwm_beeper_probe(struct platform_device *pdev)
return -ENOMEM;
beeper->pwm = devm_pwm_get(dev, NULL);
- if (IS_ERR(beeper->pwm)) {
- error = PTR_ERR(beeper->pwm);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Failed to request PWM device: %d\n",
- error);
- return error;
- }
+ if (IS_ERR(beeper->pwm))
+ return dev_err_probe(dev, PTR_ERR(beeper->pwm), "Failed to request PWM device\n");
/* Sync up PWM state and ensure it is off. */
pwm_init_state(beeper->pwm, &state);
@@ -151,13 +146,9 @@ static int pwm_beeper_probe(struct platform_device *pdev)
}
beeper->amplifier = devm_regulator_get(dev, "amp");
- if (IS_ERR(beeper->amplifier)) {
- error = PTR_ERR(beeper->amplifier);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Failed to get 'amp' regulator: %d\n",
- error);
- return error;
- }
+ if (IS_ERR(beeper->amplifier))
+ return dev_err_probe(dev, PTR_ERR(beeper->amplifier),
+ "Failed to get 'amp' regulator\n");
INIT_WORK(&beeper->work, pwm_beeper_work);
diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c
index 2ba035299db8..acac79c488aa 100644
--- a/drivers/input/misc/pwm-vibra.c
+++ b/drivers/input/misc/pwm-vibra.c
@@ -15,7 +15,7 @@
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/pwm.h>
@@ -140,32 +140,20 @@ static int pwm_vibrator_probe(struct platform_device *pdev)
return -ENOMEM;
vibrator->vcc = devm_regulator_get(&pdev->dev, "vcc");
- err = PTR_ERR_OR_ZERO(vibrator->vcc);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to request regulator: %d\n",
- err);
- return err;
- }
+ if (IS_ERR(vibrator->vcc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(vibrator->vcc),
+ "Failed to request regulator\n");
vibrator->enable_gpio = devm_gpiod_get_optional(&pdev->dev, "enable",
GPIOD_OUT_LOW);
- err = PTR_ERR_OR_ZERO(vibrator->enable_gpio);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to request enable gpio: %d\n",
- err);
- return err;
- }
+ if (IS_ERR(vibrator->enable_gpio))
+ return dev_err_probe(&pdev->dev, PTR_ERR(vibrator->enable_gpio),
+ "Failed to request enable gpio\n");
vibrator->pwm = devm_pwm_get(&pdev->dev, "enable");
- err = PTR_ERR_OR_ZERO(vibrator->pwm);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to request main pwm: %d\n",
- err);
- return err;
- }
+ if (IS_ERR(vibrator->pwm))
+ return dev_err_probe(&pdev->dev, PTR_ERR(vibrator->pwm),
+ "Failed to request main pwm\n");
INIT_WORK(&vibrator->play_work, pwm_vibrator_play_work);
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 22ec62083065..e94cab8133be 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -236,12 +236,8 @@ static int rotary_encoder_probe(struct platform_device *pdev)
device_property_read_bool(dev, "rotary-encoder,relative-axis");
encoder->gpios = devm_gpiod_get_array(dev, NULL, GPIOD_IN);
- if (IS_ERR(encoder->gpios)) {
- err = PTR_ERR(encoder->gpios);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "unable to get gpios: %d\n", err);
- return err;
- }
+ if (IS_ERR(encoder->gpios))
+ return dev_err_probe(dev, PTR_ERR(encoder->gpios), "unable to get gpios\n");
if (encoder->gpios->ndescs < 2) {
dev_err(dev, "not enough gpios found\n");
return -EINVAL;
@@ -255,7 +251,6 @@ static int rotary_encoder_probe(struct platform_device *pdev)
input->name = pdev->name;
input->id.bustype = BUS_HOST;
- input->dev.parent = dev;
if (encoder->relative_axis)
input_set_capability(input, EV_REL, encoder->axis);
diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
index cdcb7737c46a..e5dd84725c6e 100644
--- a/drivers/input/misc/sparcspkr.c
+++ b/drivers/input/misc/sparcspkr.c
@@ -9,7 +9,8 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/io.h>
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 0cff742302a9..148a601396f9 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1221,13 +1221,8 @@ static int elan_probe(struct i2c_client *client)
mutex_init(&data->sysfs_mutex);
data->vcc = devm_regulator_get(dev, "vcc");
- if (IS_ERR(data->vcc)) {
- error = PTR_ERR(data->vcc);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Failed to get 'vcc' regulator: %d\n",
- error);
- return error;
- }
+ if (IS_ERR(data->vcc))
+ return dev_err_probe(dev, PTR_ERR(data->vcc), "Failed to get 'vcc' regulator\n");
error = regulator_enable(data->vcc);
if (error) {
diff --git a/drivers/input/mouse/psmouse-smbus.c b/drivers/input/mouse/psmouse-smbus.c
index 2a2459b1b4f2..7b13de979908 100644
--- a/drivers/input/mouse/psmouse-smbus.c
+++ b/drivers/input/mouse/psmouse-smbus.c
@@ -5,6 +5,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/libps2.h>
@@ -118,13 +119,18 @@ static psmouse_ret_t psmouse_smbus_process_byte(struct psmouse *psmouse)
return PSMOUSE_FULL_PACKET;
}
-static int psmouse_smbus_reconnect(struct psmouse *psmouse)
+static void psmouse_activate_smbus_mode(struct psmouse_smbus_dev *smbdev)
{
- struct psmouse_smbus_dev *smbdev = psmouse->private;
-
- if (smbdev->need_deactivate)
- psmouse_deactivate(psmouse);
+ if (smbdev->need_deactivate) {
+ psmouse_deactivate(smbdev->psmouse);
+ /* Give the device time to switch into SMBus mode */
+ msleep(30);
+ }
+}
+static int psmouse_smbus_reconnect(struct psmouse *psmouse)
+{
+ psmouse_activate_smbus_mode(psmouse->private);
return 0;
}
@@ -257,8 +263,7 @@ int psmouse_smbus_init(struct psmouse *psmouse,
}
}
- if (need_deactivate)
- psmouse_deactivate(psmouse);
+ psmouse_activate_smbus_mode(smbdev);
psmouse->private = smbdev;
psmouse->protocol_handler = psmouse_smbus_process_byte;
diff --git a/drivers/input/serio/apbps2.c b/drivers/input/serio/apbps2.c
index 513d96e40e0e..3f6866d39b86 100644
--- a/drivers/input/serio/apbps2.c
+++ b/drivers/input/serio/apbps2.c
@@ -14,11 +14,11 @@
* Contributors: Daniel Hellstrom <daniel@gaisler.com>
*/
#include <linux/platform_device.h>
-#include <linux/of_device.h>
#include <linux/module.h>
#include <linux/serio.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/device.h>
#include <linux/delay.h>
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
index 028e45bd050b..1724d6cb8649 100644
--- a/drivers/input/serio/i8042-acpipnpio.h
+++ b/drivers/input/serio/i8042-acpipnpio.h
@@ -1281,6 +1281,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
+ /* See comment on TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU above */
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PD5x_7xPNP_PNR_PNN_PNT"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX)
+ },
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "X170SM"),
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index c712c1fe0605..b68793bf05c8 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -2,7 +2,9 @@
#ifndef _I8042_SPARCIO_H
#define _I8042_SPARCIO_H
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/types.h>
#include <asm/io.h>
diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c
index ce420eb1f51b..e8a9709f32eb 100644
--- a/drivers/input/serio/rpckbd.c
+++ b/drivers/input/serio/rpckbd.c
@@ -101,12 +101,12 @@ static int rpckbd_probe(struct platform_device *dev)
int tx_irq, rx_irq;
rx_irq = platform_get_irq(dev, 0);
- if (rx_irq <= 0)
- return rx_irq < 0 ? rx_irq : -ENXIO;
+ if (rx_irq < 0)
+ return rx_irq;
tx_irq = platform_get_irq(dev, 1);
- if (tx_irq <= 0)
- return tx_irq < 0 ? tx_irq : -ENXIO;
+ if (tx_irq < 0)
+ return tx_irq;
serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
rpckbd = kzalloc(sizeof(*rpckbd), GFP_KERNEL);
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index 960d7601fbc8..f3d28da70b75 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -14,10 +14,10 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#define DRIVER_NAME "xilinx_ps2"
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index c2cbd332af1d..e3e2324547b9 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -655,10 +655,10 @@ config TOUCHSCREEN_MTOUCH
module will be called mtouch.
config TOUCHSCREEN_NOVATEK_NVT_TS
- tristate "Novatek NVT-ts touchscreen support"
+ tristate "Novatek NT11205 touchscreen support"
depends on I2C
help
- Say Y here if you have a Novatek NVT-ts touchscreen.
+ Say Y here if you have a Novatek NT11205 touchscreen.
If unsure, say N.
To compile this driver as a module, choose M here: the
@@ -1365,6 +1365,16 @@ config TOUCHSCREEN_IQS5XX
To compile this driver as a module, choose M here: the
module will be called iqs5xx.
+config TOUCHSCREEN_IQS7211
+ tristate "Azoteq IQS7210A/7211A/E trackpad/touchscreen controller"
+ depends on I2C
+ help
+ Say Y to enable support for the Azoteq IQS7210A/7211A/E
+ family of trackpad/touchscreen controllers.
+
+ To compile this driver as a module, choose M here: the
+ module will be called iqs7211.
+
config TOUCHSCREEN_ZINITIX
tristate "Zinitix touchscreen support"
depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 159cd5136fdb..62bd24f3ac8e 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -115,5 +115,6 @@ obj-$(CONFIG_TOUCHSCREEN_COLIBRI_VF50) += colibri-vf50-ts.o
obj-$(CONFIG_TOUCHSCREEN_ROHM_BU21023) += rohm_bu21023.o
obj-$(CONFIG_TOUCHSCREEN_RASPBERRYPI_FW) += raspberrypi-ts.o
obj-$(CONFIG_TOUCHSCREEN_IQS5XX) += iqs5xx.o
+obj-$(CONFIG_TOUCHSCREEN_IQS7211) += iqs7211.o
obj-$(CONFIG_TOUCHSCREEN_ZINITIX) += zinitix.o
obj-$(CONFIG_TOUCHSCREEN_HIMAX_HX83112B) += himax_hx83112b.o
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index 85332cfaa29d..652439a79e21 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -410,31 +410,32 @@ static int bu21013_probe(struct i2c_client *client)
struct input_dev *in_dev;
struct input_absinfo *info;
u32 max_x = 0, max_y = 0;
+ struct device *dev = &client->dev;
int error;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA)) {
- dev_err(&client->dev, "i2c smbus byte data not supported\n");
+ dev_err(dev, "i2c smbus byte data not supported\n");
return -EIO;
}
if (!client->irq) {
- dev_err(&client->dev, "No IRQ set up\n");
+ dev_err(dev, "No IRQ set up\n");
return -EINVAL;
}
- ts = devm_kzalloc(&client->dev, sizeof(*ts), GFP_KERNEL);
+ ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
if (!ts)
return -ENOMEM;
ts->client = client;
- ts->x_flip = device_property_read_bool(&client->dev, "rohm,flip-x");
- ts->y_flip = device_property_read_bool(&client->dev, "rohm,flip-y");
+ ts->x_flip = device_property_read_bool(dev, "rohm,flip-x");
+ ts->y_flip = device_property_read_bool(dev, "rohm,flip-y");
- in_dev = devm_input_allocate_device(&client->dev);
+ in_dev = devm_input_allocate_device(dev);
if (!in_dev) {
- dev_err(&client->dev, "device memory alloc failed\n");
+ dev_err(dev, "device memory alloc failed\n");
return -ENOMEM;
}
ts->in_dev = in_dev;
@@ -444,8 +445,8 @@ static int bu21013_probe(struct i2c_client *client)
in_dev->name = DRIVER_TP;
in_dev->id.bustype = BUS_I2C;
- device_property_read_u32(&client->dev, "rohm,touch-max-x", &max_x);
- device_property_read_u32(&client->dev, "rohm,touch-max-y", &max_y);
+ device_property_read_u32(dev, "rohm,touch-max-x", &max_x);
+ device_property_read_u32(dev, "rohm,touch-max-y", &max_y);
input_set_abs_params(in_dev, ABS_MT_POSITION_X, 0, max_x, 0, 0);
input_set_abs_params(in_dev, ABS_MT_POSITION_Y, 0, max_y, 0, 0);
@@ -454,14 +455,14 @@ static int bu21013_probe(struct i2c_client *client)
/* Adjust for the legacy "flip" properties, if present */
if (!ts->props.invert_x &&
- device_property_read_bool(&client->dev, "rohm,flip-x")) {
+ device_property_read_bool(dev, "rohm,flip-x")) {
info = &in_dev->absinfo[ABS_MT_POSITION_X];
info->maximum -= info->minimum;
info->minimum = 0;
}
if (!ts->props.invert_y &&
- device_property_read_bool(&client->dev, "rohm,flip-y")) {
+ device_property_read_bool(dev, "rohm,flip-y")) {
info = &in_dev->absinfo[ABS_MT_POSITION_Y];
info->maximum -= info->minimum;
info->minimum = 0;
@@ -471,55 +472,46 @@ static int bu21013_probe(struct i2c_client *client)
INPUT_MT_DIRECT | INPUT_MT_TRACK |
INPUT_MT_DROP_UNUSED);
if (error) {
- dev_err(&client->dev, "failed to initialize MT slots");
+ dev_err(dev, "failed to initialize MT slots");
return error;
}
- ts->regulator = devm_regulator_get(&client->dev, "avdd");
+ ts->regulator = devm_regulator_get(dev, "avdd");
if (IS_ERR(ts->regulator)) {
- dev_err(&client->dev, "regulator_get failed\n");
+ dev_err(dev, "regulator_get failed\n");
return PTR_ERR(ts->regulator);
}
error = regulator_enable(ts->regulator);
if (error) {
- dev_err(&client->dev, "regulator enable failed\n");
+ dev_err(dev, "regulator enable failed\n");
return error;
}
- error = devm_add_action_or_reset(&client->dev, bu21013_power_off, ts);
+ error = devm_add_action_or_reset(dev, bu21013_power_off, ts);
if (error) {
- dev_err(&client->dev, "failed to install power off handler\n");
+ dev_err(dev, "failed to install power off handler\n");
return error;
}
/* Named "CS" on the chip, DT binding is "reset" */
- ts->cs_gpiod = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_HIGH);
- error = PTR_ERR_OR_ZERO(ts->cs_gpiod);
- if (error) {
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev, "failed to get CS GPIO\n");
- return error;
- }
+ ts->cs_gpiod = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts->cs_gpiod))
+ return dev_err_probe(dev, PTR_ERR(ts->cs_gpiod), "failed to get CS GPIO\n");
+
gpiod_set_consumer_name(ts->cs_gpiod, "BU21013 CS");
- error = devm_add_action_or_reset(&client->dev,
- bu21013_disable_chip, ts);
+ error = devm_add_action_or_reset(dev, bu21013_disable_chip, ts);
if (error) {
- dev_err(&client->dev,
- "failed to install chip disable handler\n");
+ dev_err(dev, "failed to install chip disable handler\n");
return error;
}
/* Named "INT" on the chip, DT binding is "touch" */
- ts->int_gpiod = devm_gpiod_get_optional(&client->dev,
- "touch", GPIOD_IN);
+ ts->int_gpiod = devm_gpiod_get_optional(dev, "touch", GPIOD_IN);
error = PTR_ERR_OR_ZERO(ts->int_gpiod);
- if (error) {
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev, "failed to get INT GPIO\n");
- return error;
- }
+ if (error)
+ return dev_err_probe(dev, error, "failed to get INT GPIO\n");
if (ts->int_gpiod)
gpiod_set_consumer_name(ts->int_gpiod, "BU21013 INT");
@@ -527,22 +519,20 @@ static int bu21013_probe(struct i2c_client *client)
/* configure the touch panel controller */
error = bu21013_init_chip(ts);
if (error) {
- dev_err(&client->dev, "error in bu21013 config\n");
+ dev_err(dev, "error in bu21013 config\n");
return error;
}
- error = devm_request_threaded_irq(&client->dev, client->irq,
- NULL, bu21013_gpio_irq,
+ error = devm_request_threaded_irq(dev, client->irq, NULL, bu21013_gpio_irq,
IRQF_ONESHOT, DRIVER_TP, ts);
if (error) {
- dev_err(&client->dev, "request irq %d failed\n",
- client->irq);
+ dev_err(dev, "request irq %d failed\n", client->irq);
return error;
}
error = input_register_device(in_dev);
if (error) {
- dev_err(&client->dev, "failed to register input device\n");
+ dev_err(dev, "failed to register input device\n");
return error;
}
diff --git a/drivers/input/touchscreen/bu21029_ts.c b/drivers/input/touchscreen/bu21029_ts.c
index c8126d2efe95..e1dfbd92ab64 100644
--- a/drivers/input/touchscreen/bu21029_ts.c
+++ b/drivers/input/touchscreen/bu21029_ts.c
@@ -333,6 +333,7 @@ static void bu21029_stop_chip(struct input_dev *dev)
static int bu21029_probe(struct i2c_client *client)
{
+ struct device *dev = &client->dev;
struct bu21029_ts_data *bu21029;
struct input_dev *in_dev;
int error;
@@ -341,45 +342,33 @@ static int bu21029_probe(struct i2c_client *client)
I2C_FUNC_SMBUS_WRITE_BYTE |
I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
- dev_err(&client->dev,
- "i2c functionality support is not sufficient\n");
+ dev_err(dev, "i2c functionality support is not sufficient\n");
return -EIO;
}
- bu21029 = devm_kzalloc(&client->dev, sizeof(*bu21029), GFP_KERNEL);
+ bu21029 = devm_kzalloc(dev, sizeof(*bu21029), GFP_KERNEL);
if (!bu21029)
return -ENOMEM;
- error = device_property_read_u32(&client->dev, "rohm,x-plate-ohms",
- &bu21029->x_plate_ohms);
+ error = device_property_read_u32(dev, "rohm,x-plate-ohms", &bu21029->x_plate_ohms);
if (error) {
- dev_err(&client->dev,
- "invalid 'x-plate-ohms' supplied: %d\n", error);
+ dev_err(dev, "invalid 'x-plate-ohms' supplied: %d\n", error);
return error;
}
- bu21029->vdd = devm_regulator_get(&client->dev, "vdd");
- if (IS_ERR(bu21029->vdd)) {
- error = PTR_ERR(bu21029->vdd);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev,
- "failed to acquire 'vdd' supply: %d\n", error);
- return error;
- }
+ bu21029->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(bu21029->vdd))
+ return dev_err_probe(dev, PTR_ERR(bu21029->vdd),
+ "failed to acquire 'vdd' supply\n");
- bu21029->reset_gpios = devm_gpiod_get_optional(&client->dev,
- "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(bu21029->reset_gpios)) {
- error = PTR_ERR(bu21029->reset_gpios);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev,
- "failed to acquire 'reset' gpio: %d\n", error);
- return error;
- }
+ bu21029->reset_gpios = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(bu21029->reset_gpios))
+ return dev_err_probe(dev, PTR_ERR(bu21029->reset_gpios),
+ "failed to acquire 'reset' gpio\n");
- in_dev = devm_input_allocate_device(&client->dev);
+ in_dev = devm_input_allocate_device(dev);
if (!in_dev) {
- dev_err(&client->dev, "unable to allocate input device\n");
+ dev_err(dev, "unable to allocate input device\n");
return -ENOMEM;
}
@@ -400,20 +389,18 @@ static int bu21029_probe(struct i2c_client *client)
input_set_drvdata(in_dev, bu21029);
- error = devm_request_threaded_irq(&client->dev, client->irq,
- NULL, bu21029_touch_soft_irq,
+ error = devm_request_threaded_irq(dev, client->irq, NULL,
+ bu21029_touch_soft_irq,
IRQF_ONESHOT | IRQF_NO_AUTOEN,
DRIVER_NAME, bu21029);
if (error) {
- dev_err(&client->dev,
- "unable to request touch irq: %d\n", error);
+ dev_err(dev, "unable to request touch irq: %d\n", error);
return error;
}
error = input_register_device(in_dev);
if (error) {
- dev_err(&client->dev,
- "unable to register input device: %d\n", error);
+ dev_err(dev, "unable to register input device: %d\n", error);
return error;
}
diff --git a/drivers/input/touchscreen/chipone_icn8318.c b/drivers/input/touchscreen/chipone_icn8318.c
index 9fbeaf17f00b..d6876d10b252 100644
--- a/drivers/input/touchscreen/chipone_icn8318.c
+++ b/drivers/input/touchscreen/chipone_icn8318.c
@@ -191,12 +191,8 @@ static int icn8318_probe(struct i2c_client *client)
return -ENOMEM;
data->wake_gpio = devm_gpiod_get(dev, "wake", GPIOD_OUT_LOW);
- if (IS_ERR(data->wake_gpio)) {
- error = PTR_ERR(data->wake_gpio);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Error getting wake gpio: %d\n", error);
- return error;
- }
+ if (IS_ERR(data->wake_gpio))
+ return dev_err_probe(dev, PTR_ERR(data->wake_gpio), "Error getting wake gpio\n");
input = devm_input_allocate_device(dev);
if (!input)
diff --git a/drivers/input/touchscreen/cy8ctma140.c b/drivers/input/touchscreen/cy8ctma140.c
index 967ecde23e83..ea3895167b82 100644
--- a/drivers/input/touchscreen/cy8ctma140.c
+++ b/drivers/input/touchscreen/cy8ctma140.c
@@ -258,12 +258,8 @@ static int cy8ctma140_probe(struct i2c_client *client)
ts->regulators[1].supply = "vdd";
error = devm_regulator_bulk_get(dev, ARRAY_SIZE(ts->regulators),
ts->regulators);
- if (error) {
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Failed to get regulators %d\n",
- error);
- return error;
- }
+ if (error)
+ return dev_err_probe(dev, error, "Failed to get regulators\n");
error = cy8ctma140_power_up(ts);
if (error)
diff --git a/drivers/input/touchscreen/cyttsp5.c b/drivers/input/touchscreen/cyttsp5.c
index b461ded946fc..db5a885ecd72 100644
--- a/drivers/input/touchscreen/cyttsp5.c
+++ b/drivers/input/touchscreen/cyttsp5.c
@@ -18,8 +18,8 @@
#include <linux/input/touchscreen.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <asm/unaligned.h>
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 795c7dad22bf..457d53337fbb 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -1168,13 +1168,9 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client)
tsdata->max_support_points = chip_data->max_support_points;
tsdata->vcc = devm_regulator_get(&client->dev, "vcc");
- if (IS_ERR(tsdata->vcc)) {
- error = PTR_ERR(tsdata->vcc);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev,
- "failed to request regulator: %d\n", error);
- return error;
- }
+ if (IS_ERR(tsdata->vcc))
+ return dev_err_probe(&client->dev, PTR_ERR(tsdata->vcc),
+ "failed to request regulator\n");
tsdata->iovcc = devm_regulator_get(&client->dev, "iovcc");
if (IS_ERR(tsdata->iovcc)) {
diff --git a/drivers/input/touchscreen/ektf2127.c b/drivers/input/touchscreen/ektf2127.c
index fd8724a3c19f..cc3103b9cbfb 100644
--- a/drivers/input/touchscreen/ektf2127.c
+++ b/drivers/input/touchscreen/ektf2127.c
@@ -264,12 +264,8 @@ static int ektf2127_probe(struct i2c_client *client)
/* This requests the gpio *and* turns on the touchscreen controller */
ts->power_gpios = devm_gpiod_get(dev, "power", GPIOD_OUT_HIGH);
- if (IS_ERR(ts->power_gpios)) {
- error = PTR_ERR(ts->power_gpios);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Error getting power gpio: %d\n", error);
- return error;
- }
+ if (IS_ERR(ts->power_gpios))
+ return dev_err_probe(dev, PTR_ERR(ts->power_gpios), "Error getting power gpio\n");
input = devm_input_allocate_device(dev);
if (!input)
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 2da1db64126d..a1af3de9f310 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -1438,24 +1438,14 @@ static int elants_i2c_probe(struct i2c_client *client)
i2c_set_clientdata(client, ts);
ts->vcc33 = devm_regulator_get(&client->dev, "vcc33");
- if (IS_ERR(ts->vcc33)) {
- error = PTR_ERR(ts->vcc33);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev,
- "Failed to get 'vcc33' regulator: %d\n",
- error);
- return error;
- }
+ if (IS_ERR(ts->vcc33))
+ return dev_err_probe(&client->dev, PTR_ERR(ts->vcc33),
+ "Failed to get 'vcc33' regulator\n");
ts->vccio = devm_regulator_get(&client->dev, "vccio");
- if (IS_ERR(ts->vccio)) {
- error = PTR_ERR(ts->vccio);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev,
- "Failed to get 'vccio' regulator: %d\n",
- error);
- return error;
- }
+ if (IS_ERR(ts->vccio))
+ return dev_err_probe(&client->dev, PTR_ERR(ts->vccio),
+ "Failed to get 'vccio' regulator\n");
ts->reset_gpio = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ts->reset_gpio)) {
diff --git a/drivers/input/touchscreen/exc3000.c b/drivers/input/touchscreen/exc3000.c
index 4af4c1e5d0da..4c0d99aae9e0 100644
--- a/drivers/input/touchscreen/exc3000.c
+++ b/drivers/input/touchscreen/exc3000.c
@@ -7,6 +7,7 @@
* minimal implementation based on egalax_ts.c and egalax_i2c.c
*/
+#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -18,6 +19,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/regulator/consumer.h>
#include <linux/sizes.h>
#include <linux/timer.h>
#include <asm/unaligned.h>
@@ -360,6 +362,12 @@ static int exc3000_probe(struct i2c_client *client)
if (IS_ERR(data->reset))
return PTR_ERR(data->reset);
+ /* For proper reset sequence, enable power while reset asserted */
+ error = devm_regulator_get_enable(&client->dev, "vdd");
+ if (error && error != -ENODEV)
+ return dev_err_probe(&client->dev, error,
+ "failed to request vdd regulator\n");
+
if (data->reset) {
msleep(EXC3000_RESET_MS);
gpiod_set_value_cansleep(data->reset, 0);
@@ -454,10 +462,19 @@ static const struct of_device_id exc3000_of_match[] = {
MODULE_DEVICE_TABLE(of, exc3000_of_match);
#endif
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id exc3000_acpi_match[] = {
+ { "EGA00001", .driver_data = (kernel_ulong_t)&exc3000_info[EETI_EXC80H60] },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, exc3000_acpi_match);
+#endif
+
static struct i2c_driver exc3000_driver = {
.driver = {
.name = "exc3000",
.of_match_table = of_match_ptr(exc3000_of_match),
+ .acpi_match_table = ACPI_PTR(exc3000_acpi_match),
},
.id_table = exc3000_id,
.probe = exc3000_probe,
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index f5aa240739f9..da9954d6df44 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -935,7 +935,6 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
*/
static int goodix_get_gpio_config(struct goodix_ts_data *ts)
{
- int error;
struct device *dev;
struct gpio_desc *gpiod;
bool added_acpi_mappings = false;
@@ -951,33 +950,20 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts)
ts->gpiod_rst_flags = GPIOD_IN;
ts->avdd28 = devm_regulator_get(dev, "AVDD28");
- if (IS_ERR(ts->avdd28)) {
- error = PTR_ERR(ts->avdd28);
- if (error != -EPROBE_DEFER)
- dev_err(dev,
- "Failed to get AVDD28 regulator: %d\n", error);
- return error;
- }
+ if (IS_ERR(ts->avdd28))
+ return dev_err_probe(dev, PTR_ERR(ts->avdd28), "Failed to get AVDD28 regulator\n");
ts->vddio = devm_regulator_get(dev, "VDDIO");
- if (IS_ERR(ts->vddio)) {
- error = PTR_ERR(ts->vddio);
- if (error != -EPROBE_DEFER)
- dev_err(dev,
- "Failed to get VDDIO regulator: %d\n", error);
- return error;
- }
+ if (IS_ERR(ts->vddio))
+ return dev_err_probe(dev, PTR_ERR(ts->vddio), "Failed to get VDDIO regulator\n");
retry_get_irq_gpio:
/* Get the interrupt GPIO pin number */
gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_INT_NAME, GPIOD_IN);
- if (IS_ERR(gpiod)) {
- error = PTR_ERR(gpiod);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Failed to get %s GPIO: %d\n",
- GOODIX_GPIO_INT_NAME, error);
- return error;
- }
+ if (IS_ERR(gpiod))
+ return dev_err_probe(dev, PTR_ERR(gpiod), "Failed to get %s GPIO\n",
+ GOODIX_GPIO_INT_NAME);
+
if (!gpiod && has_acpi_companion(dev) && !added_acpi_mappings) {
added_acpi_mappings = true;
if (goodix_add_acpi_gpio_mappings(ts) == 0)
@@ -988,13 +974,9 @@ retry_get_irq_gpio:
/* Get the reset line GPIO pin number */
gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME, ts->gpiod_rst_flags);
- if (IS_ERR(gpiod)) {
- error = PTR_ERR(gpiod);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Failed to get %s GPIO: %d\n",
- GOODIX_GPIO_RST_NAME, error);
- return error;
- }
+ if (IS_ERR(gpiod))
+ return dev_err_probe(dev, PTR_ERR(gpiod), "Failed to get %s GPIO\n",
+ GOODIX_GPIO_RST_NAME);
ts->gpiod_rst = gpiod;
@@ -1517,6 +1499,7 @@ MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
static const struct acpi_device_id goodix_acpi_match[] = {
{ "GDIX1001", 0 },
{ "GDIX1002", 0 },
+ { "GDX9110", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index f7cd773f7292..ad6828e4f2e2 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -8,8 +8,8 @@
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
diff --git a/drivers/input/touchscreen/iqs5xx.c b/drivers/input/touchscreen/iqs5xx.c
index 0aa9d6492df8..b4768b66eb10 100644
--- a/drivers/input/touchscreen/iqs5xx.c
+++ b/drivers/input/touchscreen/iqs5xx.c
@@ -23,8 +23,8 @@
#include <linux/input/touchscreen.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
diff --git a/drivers/input/touchscreen/iqs7211.c b/drivers/input/touchscreen/iqs7211.c
new file mode 100644
index 000000000000..dc084f873762
--- /dev/null
+++ b/drivers/input/touchscreen/iqs7211.c
@@ -0,0 +1,2557 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Azoteq IQS7210A/7211A/E Trackpad/Touchscreen Controller
+ *
+ * Copyright (C) 2023 Jeff LaBundy <jeff@labundy.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#define IQS7211_PROD_NUM 0x00
+
+#define IQS7211_EVENT_MASK_ALL GENMASK(14, 8)
+#define IQS7211_EVENT_MASK_ALP BIT(13)
+#define IQS7211_EVENT_MASK_BTN BIT(12)
+#define IQS7211_EVENT_MASK_ATI BIT(11)
+#define IQS7211_EVENT_MASK_MOVE BIT(10)
+#define IQS7211_EVENT_MASK_GSTR BIT(9)
+#define IQS7211_EVENT_MODE BIT(8)
+
+#define IQS7211_COMMS_ERROR 0xEEEE
+#define IQS7211_COMMS_RETRY_MS 50
+#define IQS7211_COMMS_SLEEP_US 100
+#define IQS7211_COMMS_TIMEOUT_US (100 * USEC_PER_MSEC)
+#define IQS7211_RESET_TIMEOUT_MS 150
+#define IQS7211_START_TIMEOUT_US (1 * USEC_PER_SEC)
+
+#define IQS7211_NUM_RETRIES 5
+#define IQS7211_NUM_CRX 8
+#define IQS7211_MAX_CTX 13
+
+#define IQS7211_MAX_CONTACTS 2
+#define IQS7211_MAX_CYCLES 21
+
+/*
+ * The following delay is used during instances that must wait for the open-
+ * drain RDY pin to settle. Its value is calculated as 5*R*C, where R and C
+ * represent typical datasheet values of 4.7k and 100 nF, respectively.
+ */
+#define iqs7211_irq_wait() usleep_range(2500, 2600)
+
+enum iqs7211_dev_id {
+ IQS7210A,
+ IQS7211A,
+ IQS7211E,
+};
+
+enum iqs7211_comms_mode {
+ IQS7211_COMMS_MODE_WAIT,
+ IQS7211_COMMS_MODE_FREE,
+ IQS7211_COMMS_MODE_FORCE,
+};
+
+struct iqs7211_reg_field_desc {
+ struct list_head list;
+ u8 addr;
+ u16 mask;
+ u16 val;
+};
+
+enum iqs7211_reg_key_id {
+ IQS7211_REG_KEY_NONE,
+ IQS7211_REG_KEY_PROX,
+ IQS7211_REG_KEY_TOUCH,
+ IQS7211_REG_KEY_TAP,
+ IQS7211_REG_KEY_HOLD,
+ IQS7211_REG_KEY_PALM,
+ IQS7211_REG_KEY_AXIAL_X,
+ IQS7211_REG_KEY_AXIAL_Y,
+ IQS7211_REG_KEY_RESERVED
+};
+
+enum iqs7211_reg_grp_id {
+ IQS7211_REG_GRP_TP,
+ IQS7211_REG_GRP_BTN,
+ IQS7211_REG_GRP_ALP,
+ IQS7211_REG_GRP_SYS,
+ IQS7211_NUM_REG_GRPS
+};
+
+static const char * const iqs7211_reg_grp_names[IQS7211_NUM_REG_GRPS] = {
+ [IQS7211_REG_GRP_TP] = "trackpad",
+ [IQS7211_REG_GRP_BTN] = "button",
+ [IQS7211_REG_GRP_ALP] = "alp",
+};
+
+static const u16 iqs7211_reg_grp_masks[IQS7211_NUM_REG_GRPS] = {
+ [IQS7211_REG_GRP_TP] = IQS7211_EVENT_MASK_GSTR,
+ [IQS7211_REG_GRP_BTN] = IQS7211_EVENT_MASK_BTN,
+ [IQS7211_REG_GRP_ALP] = IQS7211_EVENT_MASK_ALP,
+};
+
+struct iqs7211_event_desc {
+ const char *name;
+ u16 mask;
+ u16 enable;
+ enum iqs7211_reg_grp_id reg_grp;
+ enum iqs7211_reg_key_id reg_key;
+};
+
+static const struct iqs7211_event_desc iqs7210a_kp_events[] = {
+ {
+ .mask = BIT(10),
+ .enable = BIT(13) | BIT(12),
+ .reg_grp = IQS7211_REG_GRP_ALP,
+ },
+ {
+ .name = "event-prox",
+ .mask = BIT(2),
+ .enable = BIT(5) | BIT(4),
+ .reg_grp = IQS7211_REG_GRP_BTN,
+ .reg_key = IQS7211_REG_KEY_PROX,
+ },
+ {
+ .name = "event-touch",
+ .mask = BIT(3),
+ .enable = BIT(5) | BIT(4),
+ .reg_grp = IQS7211_REG_GRP_BTN,
+ .reg_key = IQS7211_REG_KEY_TOUCH,
+ },
+ {
+ .name = "event-tap",
+ .mask = BIT(0),
+ .enable = BIT(0),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_TAP,
+ },
+ {
+ .name = "event-hold",
+ .mask = BIT(1),
+ .enable = BIT(1),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_HOLD,
+ },
+ {
+ .name = "event-swipe-x-neg",
+ .mask = BIT(2),
+ .enable = BIT(2),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_X,
+ },
+ {
+ .name = "event-swipe-x-pos",
+ .mask = BIT(3),
+ .enable = BIT(3),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_X,
+ },
+ {
+ .name = "event-swipe-y-pos",
+ .mask = BIT(4),
+ .enable = BIT(4),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_Y,
+ },
+ {
+ .name = "event-swipe-y-neg",
+ .mask = BIT(5),
+ .enable = BIT(5),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_Y,
+ },
+};
+
+static const struct iqs7211_event_desc iqs7211a_kp_events[] = {
+ {
+ .mask = BIT(14),
+ .reg_grp = IQS7211_REG_GRP_ALP,
+ },
+ {
+ .name = "event-tap",
+ .mask = BIT(0),
+ .enable = BIT(0),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_TAP,
+ },
+ {
+ .name = "event-hold",
+ .mask = BIT(1),
+ .enable = BIT(1),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_HOLD,
+ },
+ {
+ .name = "event-swipe-x-neg",
+ .mask = BIT(2),
+ .enable = BIT(2),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_X,
+ },
+ {
+ .name = "event-swipe-x-pos",
+ .mask = BIT(3),
+ .enable = BIT(3),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_X,
+ },
+ {
+ .name = "event-swipe-y-pos",
+ .mask = BIT(4),
+ .enable = BIT(4),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_Y,
+ },
+ {
+ .name = "event-swipe-y-neg",
+ .mask = BIT(5),
+ .enable = BIT(5),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_Y,
+ },
+};
+
+static const struct iqs7211_event_desc iqs7211e_kp_events[] = {
+ {
+ .mask = BIT(14),
+ .reg_grp = IQS7211_REG_GRP_ALP,
+ },
+ {
+ .name = "event-tap",
+ .mask = BIT(0),
+ .enable = BIT(0),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_TAP,
+ },
+ {
+ .name = "event-tap-double",
+ .mask = BIT(1),
+ .enable = BIT(1),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_TAP,
+ },
+ {
+ .name = "event-tap-triple",
+ .mask = BIT(2),
+ .enable = BIT(2),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_TAP,
+ },
+ {
+ .name = "event-hold",
+ .mask = BIT(3),
+ .enable = BIT(3),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_HOLD,
+ },
+ {
+ .name = "event-palm",
+ .mask = BIT(4),
+ .enable = BIT(4),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_PALM,
+ },
+ {
+ .name = "event-swipe-x-pos",
+ .mask = BIT(8),
+ .enable = BIT(8),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_X,
+ },
+ {
+ .name = "event-swipe-x-neg",
+ .mask = BIT(9),
+ .enable = BIT(9),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_X,
+ },
+ {
+ .name = "event-swipe-y-pos",
+ .mask = BIT(10),
+ .enable = BIT(10),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_Y,
+ },
+ {
+ .name = "event-swipe-y-neg",
+ .mask = BIT(11),
+ .enable = BIT(11),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_Y,
+ },
+ {
+ .name = "event-swipe-x-pos-hold",
+ .mask = BIT(12),
+ .enable = BIT(12),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_HOLD,
+ },
+ {
+ .name = "event-swipe-x-neg-hold",
+ .mask = BIT(13),
+ .enable = BIT(13),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_HOLD,
+ },
+ {
+ .name = "event-swipe-y-pos-hold",
+ .mask = BIT(14),
+ .enable = BIT(14),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_HOLD,
+ },
+ {
+ .name = "event-swipe-y-neg-hold",
+ .mask = BIT(15),
+ .enable = BIT(15),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_HOLD,
+ },
+};
+
+struct iqs7211_dev_desc {
+ const char *tp_name;
+ const char *kp_name;
+ u16 prod_num;
+ u16 show_reset;
+ u16 ati_error[IQS7211_NUM_REG_GRPS];
+ u16 ati_start[IQS7211_NUM_REG_GRPS];
+ u16 suspend;
+ u16 ack_reset;
+ u16 comms_end;
+ u16 comms_req;
+ int charge_shift;
+ int info_offs;
+ int gesture_offs;
+ int contact_offs;
+ u8 sys_stat;
+ u8 sys_ctrl;
+ u8 alp_config;
+ u8 tp_config;
+ u8 exp_file;
+ u8 kp_enable[IQS7211_NUM_REG_GRPS];
+ u8 gesture_angle;
+ u8 rx_tx_map;
+ u8 cycle_alloc[2];
+ u8 cycle_limit[2];
+ const struct iqs7211_event_desc *kp_events;
+ int num_kp_events;
+ int min_crx_alp;
+ int num_ctx;
+};
+
+static const struct iqs7211_dev_desc iqs7211_devs[] = {
+ [IQS7210A] = {
+ .tp_name = "iqs7210a_trackpad",
+ .kp_name = "iqs7210a_keys",
+ .prod_num = 944,
+ .show_reset = BIT(15),
+ .ati_error = {
+ [IQS7211_REG_GRP_TP] = BIT(12),
+ [IQS7211_REG_GRP_BTN] = BIT(0),
+ [IQS7211_REG_GRP_ALP] = BIT(8),
+ },
+ .ati_start = {
+ [IQS7211_REG_GRP_TP] = BIT(13),
+ [IQS7211_REG_GRP_BTN] = BIT(1),
+ [IQS7211_REG_GRP_ALP] = BIT(9),
+ },
+ .suspend = BIT(11),
+ .ack_reset = BIT(7),
+ .comms_end = BIT(2),
+ .comms_req = BIT(1),
+ .charge_shift = 4,
+ .info_offs = 0,
+ .gesture_offs = 1,
+ .contact_offs = 4,
+ .sys_stat = 0x0A,
+ .sys_ctrl = 0x35,
+ .alp_config = 0x39,
+ .tp_config = 0x4E,
+ .exp_file = 0x57,
+ .kp_enable = {
+ [IQS7211_REG_GRP_TP] = 0x58,
+ [IQS7211_REG_GRP_BTN] = 0x37,
+ [IQS7211_REG_GRP_ALP] = 0x37,
+ },
+ .gesture_angle = 0x5F,
+ .rx_tx_map = 0x60,
+ .cycle_alloc = { 0x66, 0x75, },
+ .cycle_limit = { 10, 6, },
+ .kp_events = iqs7210a_kp_events,
+ .num_kp_events = ARRAY_SIZE(iqs7210a_kp_events),
+ .min_crx_alp = 4,
+ .num_ctx = IQS7211_MAX_CTX - 1,
+ },
+ [IQS7211A] = {
+ .tp_name = "iqs7211a_trackpad",
+ .kp_name = "iqs7211a_keys",
+ .prod_num = 763,
+ .show_reset = BIT(7),
+ .ati_error = {
+ [IQS7211_REG_GRP_TP] = BIT(3),
+ [IQS7211_REG_GRP_ALP] = BIT(5),
+ },
+ .ati_start = {
+ [IQS7211_REG_GRP_TP] = BIT(5),
+ [IQS7211_REG_GRP_ALP] = BIT(6),
+ },
+ .ack_reset = BIT(7),
+ .comms_req = BIT(4),
+ .charge_shift = 0,
+ .info_offs = 0,
+ .gesture_offs = 1,
+ .contact_offs = 4,
+ .sys_stat = 0x10,
+ .sys_ctrl = 0x50,
+ .tp_config = 0x60,
+ .alp_config = 0x72,
+ .exp_file = 0x74,
+ .kp_enable = {
+ [IQS7211_REG_GRP_TP] = 0x80,
+ },
+ .gesture_angle = 0x87,
+ .rx_tx_map = 0x90,
+ .cycle_alloc = { 0xA0, 0xB0, },
+ .cycle_limit = { 10, 8, },
+ .kp_events = iqs7211a_kp_events,
+ .num_kp_events = ARRAY_SIZE(iqs7211a_kp_events),
+ .num_ctx = IQS7211_MAX_CTX - 1,
+ },
+ [IQS7211E] = {
+ .tp_name = "iqs7211e_trackpad",
+ .kp_name = "iqs7211e_keys",
+ .prod_num = 1112,
+ .show_reset = BIT(7),
+ .ati_error = {
+ [IQS7211_REG_GRP_TP] = BIT(3),
+ [IQS7211_REG_GRP_ALP] = BIT(5),
+ },
+ .ati_start = {
+ [IQS7211_REG_GRP_TP] = BIT(5),
+ [IQS7211_REG_GRP_ALP] = BIT(6),
+ },
+ .suspend = BIT(11),
+ .ack_reset = BIT(7),
+ .comms_end = BIT(6),
+ .comms_req = BIT(4),
+ .charge_shift = 0,
+ .info_offs = 1,
+ .gesture_offs = 0,
+ .contact_offs = 2,
+ .sys_stat = 0x0E,
+ .sys_ctrl = 0x33,
+ .tp_config = 0x41,
+ .alp_config = 0x36,
+ .exp_file = 0x4A,
+ .kp_enable = {
+ [IQS7211_REG_GRP_TP] = 0x4B,
+ },
+ .gesture_angle = 0x55,
+ .rx_tx_map = 0x56,
+ .cycle_alloc = { 0x5D, 0x6C, },
+ .cycle_limit = { 10, 11, },
+ .kp_events = iqs7211e_kp_events,
+ .num_kp_events = ARRAY_SIZE(iqs7211e_kp_events),
+ .num_ctx = IQS7211_MAX_CTX,
+ },
+};
+
+struct iqs7211_prop_desc {
+ const char *name;
+ enum iqs7211_reg_key_id reg_key;
+ u8 reg_addr[IQS7211_NUM_REG_GRPS][ARRAY_SIZE(iqs7211_devs)];
+ int reg_shift;
+ int reg_width;
+ int val_pitch;
+ int val_min;
+ int val_max;
+ const char *label;
+};
+
+static const struct iqs7211_prop_desc iqs7211_props[] = {
+ {
+ .name = "azoteq,ati-frac-div-fine",
+ .reg_addr = {
+ [IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x1E,
+ [IQS7211A] = 0x30,
+ [IQS7211E] = 0x21,
+ },
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x22,
+ },
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x23,
+ [IQS7211A] = 0x36,
+ [IQS7211E] = 0x25,
+ },
+ },
+ .reg_shift = 9,
+ .reg_width = 5,
+ .label = "ATI fine fractional divider",
+ },
+ {
+ .name = "azoteq,ati-frac-mult-coarse",
+ .reg_addr = {
+ [IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x1E,
+ [IQS7211A] = 0x30,
+ [IQS7211E] = 0x21,
+ },
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x22,
+ },
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x23,
+ [IQS7211A] = 0x36,
+ [IQS7211E] = 0x25,
+ },
+ },
+ .reg_shift = 5,
+ .reg_width = 4,
+ .label = "ATI coarse fractional multiplier",
+ },
+ {
+ .name = "azoteq,ati-frac-div-coarse",
+ .reg_addr = {
+ [IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x1E,
+ [IQS7211A] = 0x30,
+ [IQS7211E] = 0x21,
+ },
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x22,
+ },
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x23,
+ [IQS7211A] = 0x36,
+ [IQS7211E] = 0x25,
+ },
+ },
+ .reg_shift = 0,
+ .reg_width = 5,
+ .label = "ATI coarse fractional divider",
+ },
+ {
+ .name = "azoteq,ati-comp-div",
+ .reg_addr = {
+ [IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x1F,
+ [IQS7211E] = 0x22,
+ },
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x24,
+ },
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7211E] = 0x26,
+ },
+ },
+ .reg_shift = 0,
+ .reg_width = 8,
+ .val_max = 31,
+ .label = "ATI compensation divider",
+ },
+ {
+ .name = "azoteq,ati-comp-div",
+ .reg_addr = {
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x24,
+ },
+ },
+ .reg_shift = 8,
+ .reg_width = 8,
+ .val_max = 31,
+ .label = "ATI compensation divider",
+ },
+ {
+ .name = "azoteq,ati-comp-div",
+ .reg_addr = {
+ [IQS7211_REG_GRP_TP] = {
+ [IQS7211A] = 0x31,
+ },
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7211A] = 0x37,
+ },
+ },
+ .val_max = 31,
+ .label = "ATI compensation divider",
+ },
+ {
+ .name = "azoteq,ati-target",
+ .reg_addr = {
+ [IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x20,
+ [IQS7211A] = 0x32,
+ [IQS7211E] = 0x23,
+ },
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x27,
+ },
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x28,
+ [IQS7211A] = 0x38,
+ [IQS7211E] = 0x27,
+ },
+ },
+ .label = "ATI target",
+ },
+ {
+ .name = "azoteq,ati-base",
+ .reg_addr[IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x26,
+ },
+ .reg_shift = 8,
+ .reg_width = 8,
+ .val_pitch = 8,
+ .label = "ATI base",
+ },
+ {
+ .name = "azoteq,ati-base",
+ .reg_addr[IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x26,
+ },
+ .reg_shift = 0,
+ .reg_width = 8,
+ .val_pitch = 8,
+ .label = "ATI base",
+ },
+ {
+ .name = "azoteq,rate-active-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x29,
+ [IQS7211A] = 0x40,
+ [IQS7211E] = 0x28,
+ },
+ .label = "active mode report rate",
+ },
+ {
+ .name = "azoteq,rate-touch-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x2A,
+ [IQS7211A] = 0x41,
+ [IQS7211E] = 0x29,
+ },
+ .label = "idle-touch mode report rate",
+ },
+ {
+ .name = "azoteq,rate-idle-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x2B,
+ [IQS7211A] = 0x42,
+ [IQS7211E] = 0x2A,
+ },
+ .label = "idle mode report rate",
+ },
+ {
+ .name = "azoteq,rate-lp1-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x2C,
+ [IQS7211A] = 0x43,
+ [IQS7211E] = 0x2B,
+ },
+ .label = "low-power mode 1 report rate",
+ },
+ {
+ .name = "azoteq,rate-lp2-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x2D,
+ [IQS7211A] = 0x44,
+ [IQS7211E] = 0x2C,
+ },
+ .label = "low-power mode 2 report rate",
+ },
+ {
+ .name = "azoteq,timeout-active-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x2E,
+ [IQS7211A] = 0x45,
+ [IQS7211E] = 0x2D,
+ },
+ .val_pitch = 1000,
+ .label = "active mode timeout",
+ },
+ {
+ .name = "azoteq,timeout-touch-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x2F,
+ [IQS7211A] = 0x46,
+ [IQS7211E] = 0x2E,
+ },
+ .val_pitch = 1000,
+ .label = "idle-touch mode timeout",
+ },
+ {
+ .name = "azoteq,timeout-idle-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x30,
+ [IQS7211A] = 0x47,
+ [IQS7211E] = 0x2F,
+ },
+ .val_pitch = 1000,
+ .label = "idle mode timeout",
+ },
+ {
+ .name = "azoteq,timeout-lp1-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x31,
+ [IQS7211A] = 0x48,
+ [IQS7211E] = 0x30,
+ },
+ .val_pitch = 1000,
+ .label = "low-power mode 1 timeout",
+ },
+ {
+ .name = "azoteq,timeout-lp2-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x32,
+ [IQS7211E] = 0x31,
+ },
+ .reg_shift = 8,
+ .reg_width = 8,
+ .val_pitch = 1000,
+ .val_max = 60000,
+ .label = "trackpad reference value update rate",
+ },
+ {
+ .name = "azoteq,timeout-lp2-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7211A] = 0x49,
+ },
+ .val_pitch = 1000,
+ .val_max = 60000,
+ .label = "trackpad reference value update rate",
+ },
+ {
+ .name = "azoteq,timeout-ati-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x32,
+ [IQS7211E] = 0x31,
+ },
+ .reg_width = 8,
+ .val_pitch = 1000,
+ .val_max = 60000,
+ .label = "ATI error timeout",
+ },
+ {
+ .name = "azoteq,timeout-ati-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7211A] = 0x35,
+ },
+ .val_pitch = 1000,
+ .val_max = 60000,
+ .label = "ATI error timeout",
+ },
+ {
+ .name = "azoteq,timeout-comms-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x33,
+ [IQS7211A] = 0x4A,
+ [IQS7211E] = 0x32,
+ },
+ .label = "communication timeout",
+ },
+ {
+ .name = "azoteq,timeout-press-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x34,
+ },
+ .reg_width = 8,
+ .val_pitch = 1000,
+ .val_max = 60000,
+ .label = "press timeout",
+ },
+ {
+ .name = "azoteq,ati-mode",
+ .reg_addr[IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x37,
+ },
+ .reg_shift = 15,
+ .reg_width = 1,
+ .label = "ATI mode",
+ },
+ {
+ .name = "azoteq,ati-mode",
+ .reg_addr[IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x37,
+ },
+ .reg_shift = 7,
+ .reg_width = 1,
+ .label = "ATI mode",
+ },
+ {
+ .name = "azoteq,sense-mode",
+ .reg_addr[IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x37,
+ [IQS7211A] = 0x72,
+ [IQS7211E] = 0x36,
+ },
+ .reg_shift = 8,
+ .reg_width = 1,
+ .label = "sensing mode",
+ },
+ {
+ .name = "azoteq,sense-mode",
+ .reg_addr[IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x37,
+ },
+ .reg_shift = 0,
+ .reg_width = 2,
+ .val_max = 2,
+ .label = "sensing mode",
+ },
+ {
+ .name = "azoteq,fosc-freq",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x38,
+ [IQS7211A] = 0x52,
+ [IQS7211E] = 0x35,
+ },
+ .reg_shift = 4,
+ .reg_width = 1,
+ .label = "core clock frequency selection",
+ },
+ {
+ .name = "azoteq,fosc-trim",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x38,
+ [IQS7211A] = 0x52,
+ [IQS7211E] = 0x35,
+ },
+ .reg_shift = 0,
+ .reg_width = 4,
+ .label = "core clock frequency trim",
+ },
+ {
+ .name = "azoteq,touch-exit",
+ .reg_addr = {
+ [IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x3B,
+ [IQS7211A] = 0x53,
+ [IQS7211E] = 0x38,
+ },
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x3E,
+ },
+ },
+ .reg_shift = 8,
+ .reg_width = 8,
+ .label = "touch exit factor",
+ },
+ {
+ .name = "azoteq,touch-enter",
+ .reg_addr = {
+ [IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x3B,
+ [IQS7211A] = 0x53,
+ [IQS7211E] = 0x38,
+ },
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x3E,
+ },
+ },
+ .reg_shift = 0,
+ .reg_width = 8,
+ .label = "touch entrance factor",
+ },
+ {
+ .name = "azoteq,thresh",
+ .reg_addr = {
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x3C,
+ },
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x3D,
+ [IQS7211A] = 0x54,
+ [IQS7211E] = 0x39,
+ },
+ },
+ .label = "threshold",
+ },
+ {
+ .name = "azoteq,debounce-exit",
+ .reg_addr = {
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x3F,
+ },
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x40,
+ [IQS7211A] = 0x56,
+ [IQS7211E] = 0x3A,
+ },
+ },
+ .reg_shift = 8,
+ .reg_width = 8,
+ .label = "debounce exit factor",
+ },
+ {
+ .name = "azoteq,debounce-enter",
+ .reg_addr = {
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x3F,
+ },
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x40,
+ [IQS7211A] = 0x56,
+ [IQS7211E] = 0x3A,
+ },
+ },
+ .reg_shift = 0,
+ .reg_width = 8,
+ .label = "debounce entrance factor",
+ },
+ {
+ .name = "azoteq,conv-frac",
+ .reg_addr = {
+ [IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x48,
+ [IQS7211A] = 0x58,
+ [IQS7211E] = 0x3D,
+ },
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x49,
+ },
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x4A,
+ [IQS7211A] = 0x59,
+ [IQS7211E] = 0x3E,
+ },
+ },
+ .reg_shift = 8,
+ .reg_width = 8,
+ .label = "conversion frequency fractional divider",
+ },
+ {
+ .name = "azoteq,conv-period",
+ .reg_addr = {
+ [IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x48,
+ [IQS7211A] = 0x58,
+ [IQS7211E] = 0x3D,
+ },
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x49,
+ },
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x4A,
+ [IQS7211A] = 0x59,
+ [IQS7211E] = 0x3E,
+ },
+ },
+ .reg_shift = 0,
+ .reg_width = 8,
+ .label = "conversion period",
+ },
+ {
+ .name = "azoteq,thresh",
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x55,
+ [IQS7211A] = 0x67,
+ [IQS7211E] = 0x48,
+ },
+ .reg_shift = 0,
+ .reg_width = 8,
+ .label = "threshold",
+ },
+ {
+ .name = "azoteq,contact-split",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x55,
+ [IQS7211A] = 0x67,
+ [IQS7211E] = 0x48,
+ },
+ .reg_shift = 8,
+ .reg_width = 8,
+ .label = "contact split factor",
+ },
+ {
+ .name = "azoteq,trim-x",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x56,
+ [IQS7211E] = 0x49,
+ },
+ .reg_shift = 0,
+ .reg_width = 8,
+ .label = "horizontal trim width",
+ },
+ {
+ .name = "azoteq,trim-x",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7211A] = 0x68,
+ },
+ .label = "horizontal trim width",
+ },
+ {
+ .name = "azoteq,trim-y",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x56,
+ [IQS7211E] = 0x49,
+ },
+ .reg_shift = 8,
+ .reg_width = 8,
+ .label = "vertical trim height",
+ },
+ {
+ .name = "azoteq,trim-y",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7211A] = 0x69,
+ },
+ .label = "vertical trim height",
+ },
+ {
+ .name = "azoteq,gesture-max-ms",
+ .reg_key = IQS7211_REG_KEY_TAP,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x59,
+ [IQS7211A] = 0x81,
+ [IQS7211E] = 0x4C,
+ },
+ .label = "maximum gesture time",
+ },
+ {
+ .name = "azoteq,gesture-mid-ms",
+ .reg_key = IQS7211_REG_KEY_TAP,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7211E] = 0x4D,
+ },
+ .label = "repeated gesture time",
+ },
+ {
+ .name = "azoteq,gesture-dist",
+ .reg_key = IQS7211_REG_KEY_TAP,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x5A,
+ [IQS7211A] = 0x82,
+ [IQS7211E] = 0x4E,
+ },
+ .label = "gesture distance",
+ },
+ {
+ .name = "azoteq,gesture-dist",
+ .reg_key = IQS7211_REG_KEY_HOLD,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x5A,
+ [IQS7211A] = 0x82,
+ [IQS7211E] = 0x4E,
+ },
+ .label = "gesture distance",
+ },
+ {
+ .name = "azoteq,gesture-min-ms",
+ .reg_key = IQS7211_REG_KEY_HOLD,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x5B,
+ [IQS7211A] = 0x83,
+ [IQS7211E] = 0x4F,
+ },
+ .label = "minimum gesture time",
+ },
+ {
+ .name = "azoteq,gesture-max-ms",
+ .reg_key = IQS7211_REG_KEY_AXIAL_X,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x5C,
+ [IQS7211A] = 0x84,
+ [IQS7211E] = 0x50,
+ },
+ .label = "maximum gesture time",
+ },
+ {
+ .name = "azoteq,gesture-max-ms",
+ .reg_key = IQS7211_REG_KEY_AXIAL_Y,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x5C,
+ [IQS7211A] = 0x84,
+ [IQS7211E] = 0x50,
+ },
+ .label = "maximum gesture time",
+ },
+ {
+ .name = "azoteq,gesture-dist",
+ .reg_key = IQS7211_REG_KEY_AXIAL_X,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x5D,
+ [IQS7211A] = 0x85,
+ [IQS7211E] = 0x51,
+ },
+ .label = "gesture distance",
+ },
+ {
+ .name = "azoteq,gesture-dist",
+ .reg_key = IQS7211_REG_KEY_AXIAL_Y,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x5E,
+ [IQS7211A] = 0x86,
+ [IQS7211E] = 0x52,
+ },
+ .label = "gesture distance",
+ },
+ {
+ .name = "azoteq,gesture-dist-rep",
+ .reg_key = IQS7211_REG_KEY_AXIAL_X,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7211E] = 0x53,
+ },
+ .label = "repeated gesture distance",
+ },
+ {
+ .name = "azoteq,gesture-dist-rep",
+ .reg_key = IQS7211_REG_KEY_AXIAL_Y,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7211E] = 0x54,
+ },
+ .label = "repeated gesture distance",
+ },
+ {
+ .name = "azoteq,thresh",
+ .reg_key = IQS7211_REG_KEY_PALM,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7211E] = 0x55,
+ },
+ .reg_shift = 8,
+ .reg_width = 8,
+ .val_max = 42,
+ .label = "threshold",
+ },
+};
+
+static const u8 iqs7211_gesture_angle[] = {
+ 0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C,
+ 0x0E, 0x0F, 0x10, 0x11,
+ 0x12, 0x14, 0x15, 0x16,
+ 0x17, 0x19, 0x1A, 0x1B,
+ 0x1C, 0x1E, 0x1F, 0x21,
+ 0x22, 0x23, 0x25, 0x26,
+ 0x28, 0x2A, 0x2B, 0x2D,
+ 0x2E, 0x30, 0x32, 0x34,
+ 0x36, 0x38, 0x3A, 0x3C,
+ 0x3E, 0x40, 0x42, 0x45,
+ 0x47, 0x4A, 0x4C, 0x4F,
+ 0x52, 0x55, 0x58, 0x5B,
+ 0x5F, 0x63, 0x66, 0x6B,
+ 0x6F, 0x73, 0x78, 0x7E,
+ 0x83, 0x89, 0x90, 0x97,
+ 0x9E, 0xA7, 0xB0, 0xBA,
+ 0xC5, 0xD1, 0xDF, 0xEF,
+};
+
+struct iqs7211_ver_info {
+ __le16 prod_num;
+ __le16 major;
+ __le16 minor;
+ __le32 patch;
+} __packed;
+
+struct iqs7211_touch_data {
+ __le16 abs_x;
+ __le16 abs_y;
+ __le16 pressure;
+ __le16 area;
+} __packed;
+
+struct iqs7211_tp_config {
+ u8 tp_settings;
+ u8 total_rx;
+ u8 total_tx;
+ u8 num_contacts;
+ __le16 max_x;
+ __le16 max_y;
+} __packed;
+
+struct iqs7211_private {
+ const struct iqs7211_dev_desc *dev_desc;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *irq_gpio;
+ struct i2c_client *client;
+ struct input_dev *tp_idev;
+ struct input_dev *kp_idev;
+ struct iqs7211_ver_info ver_info;
+ struct iqs7211_tp_config tp_config;
+ struct touchscreen_properties prop;
+ struct list_head reg_field_head;
+ enum iqs7211_comms_mode comms_init;
+ enum iqs7211_comms_mode comms_mode;
+ unsigned int num_contacts;
+ unsigned int kp_code[ARRAY_SIZE(iqs7211e_kp_events)];
+ u8 rx_tx_map[IQS7211_MAX_CTX + 1];
+ u8 cycle_alloc[2][33];
+ u8 exp_file[2];
+ u16 event_mask;
+ u16 ati_start;
+ u16 gesture_cache;
+};
+
+static int iqs7211_irq_poll(struct iqs7211_private *iqs7211, u64 timeout_us)
+{
+ int error, val;
+
+ error = readx_poll_timeout(gpiod_get_value_cansleep, iqs7211->irq_gpio,
+ val, val, IQS7211_COMMS_SLEEP_US, timeout_us);
+
+ return val < 0 ? val : error;
+}
+
+static int iqs7211_hard_reset(struct iqs7211_private *iqs7211)
+{
+ if (!iqs7211->reset_gpio)
+ return 0;
+
+ gpiod_set_value_cansleep(iqs7211->reset_gpio, 1);
+
+ /*
+ * The following delay ensures the shared RDY/MCLR pin is sampled in
+ * between periodic assertions by the device and assumes the default
+ * communication timeout has not been overwritten in OTP memory.
+ */
+ if (iqs7211->reset_gpio == iqs7211->irq_gpio)
+ msleep(IQS7211_RESET_TIMEOUT_MS);
+ else
+ usleep_range(1000, 1100);
+
+ gpiod_set_value_cansleep(iqs7211->reset_gpio, 0);
+ if (iqs7211->reset_gpio == iqs7211->irq_gpio)
+ iqs7211_irq_wait();
+
+ return iqs7211_irq_poll(iqs7211, IQS7211_START_TIMEOUT_US);
+}
+
+static int iqs7211_force_comms(struct iqs7211_private *iqs7211)
+{
+ u8 msg_buf[] = { 0xFF, };
+ int ret;
+
+ switch (iqs7211->comms_mode) {
+ case IQS7211_COMMS_MODE_WAIT:
+ return iqs7211_irq_poll(iqs7211, IQS7211_START_TIMEOUT_US);
+
+ case IQS7211_COMMS_MODE_FREE:
+ return 0;
+
+ case IQS7211_COMMS_MODE_FORCE:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * The device cannot communicate until it asserts its interrupt (RDY)
+ * pin. Attempts to do so while RDY is deasserted return an ACK; how-
+ * ever all write data is ignored, and all read data returns 0xEE.
+ *
+ * Unsolicited communication must be preceded by a special force com-
+ * munication command, after which the device eventually asserts its
+ * RDY pin and agrees to communicate.
+ *
+ * Regardless of whether communication is forced or the result of an
+ * interrupt, the device automatically deasserts its RDY pin once it
+ * detects an I2C stop condition, or a timeout expires.
+ */
+ ret = gpiod_get_value_cansleep(iqs7211->irq_gpio);
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ return 0;
+
+ ret = i2c_master_send(iqs7211->client, msg_buf, sizeof(msg_buf));
+ if (ret < (int)sizeof(msg_buf)) {
+ if (ret >= 0)
+ ret = -EIO;
+
+ msleep(IQS7211_COMMS_RETRY_MS);
+ return ret;
+ }
+
+ iqs7211_irq_wait();
+
+ return iqs7211_irq_poll(iqs7211, IQS7211_COMMS_TIMEOUT_US);
+}
+
+static int iqs7211_read_burst(struct iqs7211_private *iqs7211,
+ u8 reg, void *val, u16 val_len)
+{
+ int ret, i;
+ struct i2c_client *client = iqs7211->client;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = sizeof(reg),
+ .buf = &reg,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = val_len,
+ .buf = (u8 *)val,
+ },
+ };
+
+ /*
+ * The following loop protects against an edge case in which the RDY
+ * pin is automatically deasserted just as the read is initiated. In
+ * that case, the read must be retried using forced communication.
+ */
+ for (i = 0; i < IQS7211_NUM_RETRIES; i++) {
+ ret = iqs7211_force_comms(iqs7211);
+ if (ret < 0)
+ continue;
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret < (int)ARRAY_SIZE(msg)) {
+ if (ret >= 0)
+ ret = -EIO;
+
+ msleep(IQS7211_COMMS_RETRY_MS);
+ continue;
+ }
+
+ if (get_unaligned_le16(msg[1].buf) == IQS7211_COMMS_ERROR) {
+ ret = -ENODATA;
+ continue;
+ }
+
+ ret = 0;
+ break;
+ }
+
+ iqs7211_irq_wait();
+
+ if (ret < 0)
+ dev_err(&client->dev,
+ "Failed to read from address 0x%02X: %d\n", reg, ret);
+
+ return ret;
+}
+
+static int iqs7211_read_word(struct iqs7211_private *iqs7211, u8 reg, u16 *val)
+{
+ __le16 val_buf;
+ int error;
+
+ error = iqs7211_read_burst(iqs7211, reg, &val_buf, sizeof(val_buf));
+ if (error)
+ return error;
+
+ *val = le16_to_cpu(val_buf);
+
+ return 0;
+}
+
+static int iqs7211_write_burst(struct iqs7211_private *iqs7211,
+ u8 reg, const void *val, u16 val_len)
+{
+ int msg_len = sizeof(reg) + val_len;
+ int ret, i;
+ struct i2c_client *client = iqs7211->client;
+ u8 *msg_buf;
+
+ msg_buf = kzalloc(msg_len, GFP_KERNEL);
+ if (!msg_buf)
+ return -ENOMEM;
+
+ *msg_buf = reg;
+ memcpy(msg_buf + sizeof(reg), val, val_len);
+
+ /*
+ * The following loop protects against an edge case in which the RDY
+ * pin is automatically asserted just before the force communication
+ * command is sent.
+ *
+ * In that case, the subsequent I2C stop condition tricks the device
+ * into preemptively deasserting the RDY pin and the command must be
+ * sent again.
+ */
+ for (i = 0; i < IQS7211_NUM_RETRIES; i++) {
+ ret = iqs7211_force_comms(iqs7211);
+ if (ret < 0)
+ continue;
+
+ ret = i2c_master_send(client, msg_buf, msg_len);
+ if (ret < msg_len) {
+ if (ret >= 0)
+ ret = -EIO;
+
+ msleep(IQS7211_COMMS_RETRY_MS);
+ continue;
+ }
+
+ ret = 0;
+ break;
+ }
+
+ kfree(msg_buf);
+
+ iqs7211_irq_wait();
+
+ if (ret < 0)
+ dev_err(&client->dev,
+ "Failed to write to address 0x%02X: %d\n", reg, ret);
+
+ return ret;
+}
+
+static int iqs7211_write_word(struct iqs7211_private *iqs7211, u8 reg, u16 val)
+{
+ __le16 val_buf = cpu_to_le16(val);
+
+ return iqs7211_write_burst(iqs7211, reg, &val_buf, sizeof(val_buf));
+}
+
+static int iqs7211_start_comms(struct iqs7211_private *iqs7211)
+{
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ struct i2c_client *client = iqs7211->client;
+ bool forced_comms;
+ unsigned int val;
+ u16 comms_setup;
+ int error;
+
+ /*
+ * Until forced communication can be enabled, the host must wait for a
+ * communication window each time it intends to elicit a response from
+ * the device.
+ *
+ * Forced communication is not necessary, however, if the host adapter
+ * can support clock stretching. In that case, the device freely clock
+ * stretches until all pending conversions are complete.
+ */
+ forced_comms = device_property_present(&client->dev,
+ "azoteq,forced-comms");
+
+ error = device_property_read_u32(&client->dev,
+ "azoteq,forced-comms-default", &val);
+ if (error == -EINVAL) {
+ iqs7211->comms_init = IQS7211_COMMS_MODE_WAIT;
+ } else if (error) {
+ dev_err(&client->dev,
+ "Failed to read default communication mode: %d\n",
+ error);
+ return error;
+ } else if (val) {
+ iqs7211->comms_init = forced_comms ? IQS7211_COMMS_MODE_FORCE
+ : IQS7211_COMMS_MODE_WAIT;
+ } else {
+ iqs7211->comms_init = forced_comms ? IQS7211_COMMS_MODE_WAIT
+ : IQS7211_COMMS_MODE_FREE;
+ }
+
+ iqs7211->comms_mode = iqs7211->comms_init;
+
+ error = iqs7211_hard_reset(iqs7211);
+ if (error) {
+ dev_err(&client->dev, "Failed to reset device: %d\n", error);
+ return error;
+ }
+
+ error = iqs7211_read_burst(iqs7211, IQS7211_PROD_NUM,
+ &iqs7211->ver_info,
+ sizeof(iqs7211->ver_info));
+ if (error)
+ return error;
+
+ if (le16_to_cpu(iqs7211->ver_info.prod_num) != dev_desc->prod_num) {
+ dev_err(&client->dev, "Invalid product number: %u\n",
+ le16_to_cpu(iqs7211->ver_info.prod_num));
+ return -EINVAL;
+ }
+
+ error = iqs7211_read_word(iqs7211, dev_desc->sys_ctrl + 1,
+ &comms_setup);
+ if (error)
+ return error;
+
+ if (forced_comms)
+ comms_setup |= dev_desc->comms_req;
+ else
+ comms_setup &= ~dev_desc->comms_req;
+
+ error = iqs7211_write_word(iqs7211, dev_desc->sys_ctrl + 1,
+ comms_setup | dev_desc->comms_end);
+ if (error)
+ return error;
+
+ if (forced_comms)
+ iqs7211->comms_mode = IQS7211_COMMS_MODE_FORCE;
+ else
+ iqs7211->comms_mode = IQS7211_COMMS_MODE_FREE;
+
+ error = iqs7211_read_burst(iqs7211, dev_desc->exp_file,
+ iqs7211->exp_file,
+ sizeof(iqs7211->exp_file));
+ if (error)
+ return error;
+
+ error = iqs7211_read_burst(iqs7211, dev_desc->tp_config,
+ &iqs7211->tp_config,
+ sizeof(iqs7211->tp_config));
+ if (error)
+ return error;
+
+ error = iqs7211_write_word(iqs7211, dev_desc->sys_ctrl + 1,
+ comms_setup);
+ if (error)
+ return error;
+
+ iqs7211->event_mask = comms_setup & ~IQS7211_EVENT_MASK_ALL;
+ iqs7211->event_mask |= (IQS7211_EVENT_MASK_ATI | IQS7211_EVENT_MODE);
+
+ return 0;
+}
+
+static int iqs7211_init_device(struct iqs7211_private *iqs7211)
+{
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ struct iqs7211_reg_field_desc *reg_field;
+ __le16 sys_ctrl[] = {
+ cpu_to_le16(dev_desc->ack_reset),
+ cpu_to_le16(iqs7211->event_mask),
+ };
+ int error, i;
+
+ /*
+ * Acknowledge reset before writing any registers in case the device
+ * suffers a spurious reset during initialization. The communication
+ * mode is configured at this time as well.
+ */
+ error = iqs7211_write_burst(iqs7211, dev_desc->sys_ctrl, sys_ctrl,
+ sizeof(sys_ctrl));
+ if (error)
+ return error;
+
+ if (iqs7211->event_mask & dev_desc->comms_req)
+ iqs7211->comms_mode = IQS7211_COMMS_MODE_FORCE;
+ else
+ iqs7211->comms_mode = IQS7211_COMMS_MODE_FREE;
+
+ /*
+ * Take advantage of the stop-bit disable function, if available, to
+ * save the trouble of having to reopen a communication window after
+ * each read or write.
+ */
+ error = iqs7211_write_word(iqs7211, dev_desc->sys_ctrl + 1,
+ iqs7211->event_mask | dev_desc->comms_end);
+ if (error)
+ return error;
+
+ list_for_each_entry(reg_field, &iqs7211->reg_field_head, list) {
+ u16 new_val = reg_field->val;
+
+ if (reg_field->mask < U16_MAX) {
+ u16 old_val;
+
+ error = iqs7211_read_word(iqs7211, reg_field->addr,
+ &old_val);
+ if (error)
+ return error;
+
+ new_val = old_val & ~reg_field->mask;
+ new_val |= reg_field->val;
+
+ if (new_val == old_val)
+ continue;
+ }
+
+ error = iqs7211_write_word(iqs7211, reg_field->addr, new_val);
+ if (error)
+ return error;
+ }
+
+ error = iqs7211_write_burst(iqs7211, dev_desc->tp_config,
+ &iqs7211->tp_config,
+ sizeof(iqs7211->tp_config));
+ if (error)
+ return error;
+
+ if (**iqs7211->cycle_alloc) {
+ error = iqs7211_write_burst(iqs7211, dev_desc->rx_tx_map,
+ &iqs7211->rx_tx_map,
+ dev_desc->num_ctx);
+ if (error)
+ return error;
+
+ for (i = 0; i < sizeof(dev_desc->cycle_limit); i++) {
+ error = iqs7211_write_burst(iqs7211,
+ dev_desc->cycle_alloc[i],
+ iqs7211->cycle_alloc[i],
+ dev_desc->cycle_limit[i] * 3);
+ if (error)
+ return error;
+ }
+ }
+
+ *sys_ctrl = cpu_to_le16(iqs7211->ati_start);
+
+ return iqs7211_write_burst(iqs7211, dev_desc->sys_ctrl, sys_ctrl,
+ sizeof(sys_ctrl));
+}
+
+static int iqs7211_add_field(struct iqs7211_private *iqs7211,
+ struct iqs7211_reg_field_desc new_field)
+{
+ struct i2c_client *client = iqs7211->client;
+ struct iqs7211_reg_field_desc *reg_field;
+
+ if (!new_field.addr)
+ return 0;
+
+ list_for_each_entry(reg_field, &iqs7211->reg_field_head, list) {
+ if (reg_field->addr != new_field.addr)
+ continue;
+
+ reg_field->mask |= new_field.mask;
+ reg_field->val |= new_field.val;
+ return 0;
+ }
+
+ reg_field = devm_kzalloc(&client->dev, sizeof(*reg_field), GFP_KERNEL);
+ if (!reg_field)
+ return -ENOMEM;
+
+ reg_field->addr = new_field.addr;
+ reg_field->mask = new_field.mask;
+ reg_field->val = new_field.val;
+
+ list_add(&reg_field->list, &iqs7211->reg_field_head);
+
+ return 0;
+}
+
+static int iqs7211_parse_props(struct iqs7211_private *iqs7211,
+ struct fwnode_handle *reg_grp_node,
+ enum iqs7211_reg_grp_id reg_grp,
+ enum iqs7211_reg_key_id reg_key)
+{
+ struct i2c_client *client = iqs7211->client;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(iqs7211_props); i++) {
+ const char *name = iqs7211_props[i].name;
+ u8 reg_addr = iqs7211_props[i].reg_addr[reg_grp]
+ [iqs7211->dev_desc -
+ iqs7211_devs];
+ int reg_shift = iqs7211_props[i].reg_shift;
+ int reg_width = iqs7211_props[i].reg_width ? : 16;
+ int val_pitch = iqs7211_props[i].val_pitch ? : 1;
+ int val_min = iqs7211_props[i].val_min;
+ int val_max = iqs7211_props[i].val_max;
+ const char *label = iqs7211_props[i].label ? : name;
+ struct iqs7211_reg_field_desc reg_field;
+ unsigned int val;
+ int error;
+
+ if (iqs7211_props[i].reg_key != reg_key)
+ continue;
+
+ if (!reg_addr)
+ continue;
+
+ error = fwnode_property_read_u32(reg_grp_node, name, &val);
+ if (error == -EINVAL) {
+ continue;
+ } else if (error) {
+ dev_err(&client->dev, "Failed to read %s %s: %d\n",
+ fwnode_get_name(reg_grp_node), label, error);
+ return error;
+ }
+
+ if (!val_max)
+ val_max = GENMASK(reg_width - 1, 0) * val_pitch;
+
+ if (val < val_min || val > val_max) {
+ dev_err(&client->dev, "Invalid %s: %u\n", label, val);
+ return -EINVAL;
+ }
+
+ reg_field.addr = reg_addr;
+ reg_field.mask = GENMASK(reg_shift + reg_width - 1, reg_shift);
+ reg_field.val = val / val_pitch << reg_shift;
+
+ error = iqs7211_add_field(iqs7211, reg_field);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+static int iqs7211_parse_event(struct iqs7211_private *iqs7211,
+ struct fwnode_handle *event_node,
+ enum iqs7211_reg_grp_id reg_grp,
+ enum iqs7211_reg_key_id reg_key,
+ unsigned int *event_code)
+{
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ struct i2c_client *client = iqs7211->client;
+ struct iqs7211_reg_field_desc reg_field;
+ unsigned int val;
+ int error;
+
+ error = iqs7211_parse_props(iqs7211, event_node, reg_grp, reg_key);
+ if (error)
+ return error;
+
+ if (reg_key == IQS7211_REG_KEY_AXIAL_X ||
+ reg_key == IQS7211_REG_KEY_AXIAL_Y) {
+ error = fwnode_property_read_u32(event_node,
+ "azoteq,gesture-angle", &val);
+ if (!error) {
+ if (val >= ARRAY_SIZE(iqs7211_gesture_angle)) {
+ dev_err(&client->dev,
+ "Invalid %s gesture angle: %u\n",
+ fwnode_get_name(event_node), val);
+ return -EINVAL;
+ }
+
+ reg_field.addr = dev_desc->gesture_angle;
+ reg_field.mask = U8_MAX;
+ reg_field.val = iqs7211_gesture_angle[val];
+
+ error = iqs7211_add_field(iqs7211, reg_field);
+ if (error)
+ return error;
+ } else if (error != -EINVAL) {
+ dev_err(&client->dev,
+ "Failed to read %s gesture angle: %d\n",
+ fwnode_get_name(event_node), error);
+ return error;
+ }
+ }
+
+ error = fwnode_property_read_u32(event_node, "linux,code", event_code);
+ if (error == -EINVAL)
+ error = 0;
+ else if (error)
+ dev_err(&client->dev, "Failed to read %s code: %d\n",
+ fwnode_get_name(event_node), error);
+
+ return error;
+}
+
+static int iqs7211_parse_cycles(struct iqs7211_private *iqs7211,
+ struct fwnode_handle *tp_node)
+{
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ struct i2c_client *client = iqs7211->client;
+ int num_cycles = dev_desc->cycle_limit[0] + dev_desc->cycle_limit[1];
+ int error, count, i, j, k, cycle_start;
+ unsigned int cycle_alloc[IQS7211_MAX_CYCLES][2];
+ u8 total_rx = iqs7211->tp_config.total_rx;
+ u8 total_tx = iqs7211->tp_config.total_tx;
+
+ for (i = 0; i < IQS7211_MAX_CYCLES * 2; i++)
+ *(cycle_alloc[0] + i) = U8_MAX;
+
+ count = fwnode_property_count_u32(tp_node, "azoteq,channel-select");
+ if (count == -EINVAL) {
+ /*
+ * Assign each sensing cycle's slots (0 and 1) to a channel,
+ * defined as the intersection between two CRx and CTx pins.
+ * A channel assignment of 255 means the slot is unused.
+ */
+ for (i = 0, cycle_start = 0; i < total_tx; i++) {
+ int cycle_stop = 0;
+
+ for (j = 0; j < total_rx; j++) {
+ /*
+ * Channels formed by CRx0-3 and CRx4-7 are
+ * bound to slots 0 and 1, respectively.
+ */
+ int slot = iqs7211->rx_tx_map[j] < 4 ? 0 : 1;
+ int chan = i * total_rx + j;
+
+ for (k = cycle_start; k < num_cycles; k++) {
+ if (cycle_alloc[k][slot] < U8_MAX)
+ continue;
+
+ cycle_alloc[k][slot] = chan;
+ break;
+ }
+
+ if (k < num_cycles) {
+ cycle_stop = max(k, cycle_stop);
+ continue;
+ }
+
+ dev_err(&client->dev,
+ "Insufficient number of cycles\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Sensing cycles cannot straddle more than one CTx
+ * pin. As such, the next row's starting cycle must
+ * be greater than the previous row's highest cycle.
+ */
+ cycle_start = cycle_stop + 1;
+ }
+ } else if (count < 0) {
+ dev_err(&client->dev, "Failed to count channels: %d\n", count);
+ return count;
+ } else if (count > num_cycles * 2) {
+ dev_err(&client->dev, "Insufficient number of cycles\n");
+ return -EINVAL;
+ } else if (count > 0) {
+ error = fwnode_property_read_u32_array(tp_node,
+ "azoteq,channel-select",
+ cycle_alloc[0], count);
+ if (error) {
+ dev_err(&client->dev, "Failed to read channels: %d\n",
+ error);
+ return error;
+ }
+
+ for (i = 0; i < count; i++) {
+ int chan = *(cycle_alloc[0] + i);
+
+ if (chan == U8_MAX)
+ continue;
+
+ if (chan >= total_rx * total_tx) {
+ dev_err(&client->dev, "Invalid channel: %d\n",
+ chan);
+ return -EINVAL;
+ }
+
+ for (j = 0; j < count; j++) {
+ if (j == i || *(cycle_alloc[0] + j) != chan)
+ continue;
+
+ dev_err(&client->dev, "Duplicate channel: %d\n",
+ chan);
+ return -EINVAL;
+ }
+ }
+ }
+
+ /*
+ * Once the raw channel assignments have been derived, they must be
+ * packed according to the device's register map.
+ */
+ for (i = 0, cycle_start = 0; i < sizeof(dev_desc->cycle_limit); i++) {
+ int offs = 0;
+
+ for (j = cycle_start;
+ j < cycle_start + dev_desc->cycle_limit[i]; j++) {
+ iqs7211->cycle_alloc[i][offs++] = 0x05;
+ iqs7211->cycle_alloc[i][offs++] = cycle_alloc[j][0];
+ iqs7211->cycle_alloc[i][offs++] = cycle_alloc[j][1];
+ }
+
+ cycle_start += dev_desc->cycle_limit[i];
+ }
+
+ return 0;
+}
+
+static int iqs7211_parse_tp(struct iqs7211_private *iqs7211,
+ struct fwnode_handle *tp_node)
+{
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ struct i2c_client *client = iqs7211->client;
+ unsigned int pins[IQS7211_MAX_CTX];
+ int error, count, i, j;
+
+ count = fwnode_property_count_u32(tp_node, "azoteq,rx-enable");
+ if (count == -EINVAL) {
+ return 0;
+ } else if (count < 0) {
+ dev_err(&client->dev, "Failed to count CRx pins: %d\n", count);
+ return count;
+ } else if (count > IQS7211_NUM_CRX) {
+ dev_err(&client->dev, "Invalid number of CRx pins\n");
+ return -EINVAL;
+ }
+
+ error = fwnode_property_read_u32_array(tp_node, "azoteq,rx-enable",
+ pins, count);
+ if (error) {
+ dev_err(&client->dev, "Failed to read CRx pins: %d\n", error);
+ return error;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (pins[i] >= IQS7211_NUM_CRX) {
+ dev_err(&client->dev, "Invalid CRx pin: %u\n", pins[i]);
+ return -EINVAL;
+ }
+
+ iqs7211->rx_tx_map[i] = pins[i];
+ }
+
+ iqs7211->tp_config.total_rx = count;
+
+ count = fwnode_property_count_u32(tp_node, "azoteq,tx-enable");
+ if (count < 0) {
+ dev_err(&client->dev, "Failed to count CTx pins: %d\n", count);
+ return count;
+ } else if (count > dev_desc->num_ctx) {
+ dev_err(&client->dev, "Invalid number of CTx pins\n");
+ return -EINVAL;
+ }
+
+ error = fwnode_property_read_u32_array(tp_node, "azoteq,tx-enable",
+ pins, count);
+ if (error) {
+ dev_err(&client->dev, "Failed to read CTx pins: %d\n", error);
+ return error;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (pins[i] >= dev_desc->num_ctx) {
+ dev_err(&client->dev, "Invalid CTx pin: %u\n", pins[i]);
+ return -EINVAL;
+ }
+
+ for (j = 0; j < iqs7211->tp_config.total_rx; j++) {
+ if (iqs7211->rx_tx_map[j] != pins[i])
+ continue;
+
+ dev_err(&client->dev, "Conflicting CTx pin: %u\n",
+ pins[i]);
+ return -EINVAL;
+ }
+
+ iqs7211->rx_tx_map[iqs7211->tp_config.total_rx + i] = pins[i];
+ }
+
+ iqs7211->tp_config.total_tx = count;
+
+ return iqs7211_parse_cycles(iqs7211, tp_node);
+}
+
+static int iqs7211_parse_alp(struct iqs7211_private *iqs7211,
+ struct fwnode_handle *alp_node)
+{
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ struct i2c_client *client = iqs7211->client;
+ struct iqs7211_reg_field_desc reg_field;
+ int error, count, i;
+
+ count = fwnode_property_count_u32(alp_node, "azoteq,rx-enable");
+ if (count < 0 && count != -EINVAL) {
+ dev_err(&client->dev, "Failed to count CRx pins: %d\n", count);
+ return count;
+ } else if (count > IQS7211_NUM_CRX) {
+ dev_err(&client->dev, "Invalid number of CRx pins\n");
+ return -EINVAL;
+ } else if (count >= 0) {
+ unsigned int pins[IQS7211_NUM_CRX];
+
+ error = fwnode_property_read_u32_array(alp_node,
+ "azoteq,rx-enable",
+ pins, count);
+ if (error) {
+ dev_err(&client->dev, "Failed to read CRx pins: %d\n",
+ error);
+ return error;
+ }
+
+ reg_field.addr = dev_desc->alp_config;
+ reg_field.mask = GENMASK(IQS7211_NUM_CRX - 1, 0);
+ reg_field.val = 0;
+
+ for (i = 0; i < count; i++) {
+ if (pins[i] < dev_desc->min_crx_alp ||
+ pins[i] >= IQS7211_NUM_CRX) {
+ dev_err(&client->dev, "Invalid CRx pin: %u\n",
+ pins[i]);
+ return -EINVAL;
+ }
+
+ reg_field.val |= BIT(pins[i]);
+ }
+
+ error = iqs7211_add_field(iqs7211, reg_field);
+ if (error)
+ return error;
+ }
+
+ count = fwnode_property_count_u32(alp_node, "azoteq,tx-enable");
+ if (count < 0 && count != -EINVAL) {
+ dev_err(&client->dev, "Failed to count CTx pins: %d\n", count);
+ return count;
+ } else if (count > dev_desc->num_ctx) {
+ dev_err(&client->dev, "Invalid number of CTx pins\n");
+ return -EINVAL;
+ } else if (count >= 0) {
+ unsigned int pins[IQS7211_MAX_CTX];
+
+ error = fwnode_property_read_u32_array(alp_node,
+ "azoteq,tx-enable",
+ pins, count);
+ if (error) {
+ dev_err(&client->dev, "Failed to read CTx pins: %d\n",
+ error);
+ return error;
+ }
+
+ reg_field.addr = dev_desc->alp_config + 1;
+ reg_field.mask = GENMASK(dev_desc->num_ctx - 1, 0);
+ reg_field.val = 0;
+
+ for (i = 0; i < count; i++) {
+ if (pins[i] >= dev_desc->num_ctx) {
+ dev_err(&client->dev, "Invalid CTx pin: %u\n",
+ pins[i]);
+ return -EINVAL;
+ }
+
+ reg_field.val |= BIT(pins[i]);
+ }
+
+ error = iqs7211_add_field(iqs7211, reg_field);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+static int (*iqs7211_parse_extra[IQS7211_NUM_REG_GRPS])
+ (struct iqs7211_private *iqs7211,
+ struct fwnode_handle *reg_grp_node) = {
+ [IQS7211_REG_GRP_TP] = iqs7211_parse_tp,
+ [IQS7211_REG_GRP_ALP] = iqs7211_parse_alp,
+};
+
+static int iqs7211_parse_reg_grp(struct iqs7211_private *iqs7211,
+ struct fwnode_handle *reg_grp_node,
+ enum iqs7211_reg_grp_id reg_grp)
+{
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ struct iqs7211_reg_field_desc reg_field;
+ int error, i;
+
+ error = iqs7211_parse_props(iqs7211, reg_grp_node, reg_grp,
+ IQS7211_REG_KEY_NONE);
+ if (error)
+ return error;
+
+ if (iqs7211_parse_extra[reg_grp]) {
+ error = iqs7211_parse_extra[reg_grp](iqs7211, reg_grp_node);
+ if (error)
+ return error;
+ }
+
+ iqs7211->ati_start |= dev_desc->ati_start[reg_grp];
+
+ reg_field.addr = dev_desc->kp_enable[reg_grp];
+ reg_field.mask = 0;
+ reg_field.val = 0;
+
+ for (i = 0; i < dev_desc->num_kp_events; i++) {
+ const char *event_name = dev_desc->kp_events[i].name;
+ struct fwnode_handle *event_node;
+
+ if (dev_desc->kp_events[i].reg_grp != reg_grp)
+ continue;
+
+ reg_field.mask |= dev_desc->kp_events[i].enable;
+
+ if (event_name)
+ event_node = fwnode_get_named_child_node(reg_grp_node,
+ event_name);
+ else
+ event_node = fwnode_handle_get(reg_grp_node);
+
+ if (!event_node)
+ continue;
+
+ error = iqs7211_parse_event(iqs7211, event_node,
+ dev_desc->kp_events[i].reg_grp,
+ dev_desc->kp_events[i].reg_key,
+ &iqs7211->kp_code[i]);
+ fwnode_handle_put(event_node);
+ if (error)
+ return error;
+
+ reg_field.val |= dev_desc->kp_events[i].enable;
+
+ iqs7211->event_mask |= iqs7211_reg_grp_masks[reg_grp];
+ }
+
+ return iqs7211_add_field(iqs7211, reg_field);
+}
+
+static int iqs7211_register_kp(struct iqs7211_private *iqs7211)
+{
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ struct input_dev *kp_idev = iqs7211->kp_idev;
+ struct i2c_client *client = iqs7211->client;
+ int error, i;
+
+ for (i = 0; i < dev_desc->num_kp_events; i++)
+ if (iqs7211->kp_code[i])
+ break;
+
+ if (i == dev_desc->num_kp_events)
+ return 0;
+
+ kp_idev = devm_input_allocate_device(&client->dev);
+ if (!kp_idev)
+ return -ENOMEM;
+
+ iqs7211->kp_idev = kp_idev;
+
+ kp_idev->name = dev_desc->kp_name;
+ kp_idev->id.bustype = BUS_I2C;
+
+ for (i = 0; i < dev_desc->num_kp_events; i++)
+ if (iqs7211->kp_code[i])
+ input_set_capability(iqs7211->kp_idev, EV_KEY,
+ iqs7211->kp_code[i]);
+
+ error = input_register_device(kp_idev);
+ if (error)
+ dev_err(&client->dev, "Failed to register %s: %d\n",
+ kp_idev->name, error);
+
+ return error;
+}
+
+static int iqs7211_register_tp(struct iqs7211_private *iqs7211)
+{
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ struct touchscreen_properties *prop = &iqs7211->prop;
+ struct input_dev *tp_idev = iqs7211->tp_idev;
+ struct i2c_client *client = iqs7211->client;
+ int error;
+
+ error = device_property_read_u32(&client->dev, "azoteq,num-contacts",
+ &iqs7211->num_contacts);
+ if (error == -EINVAL) {
+ return 0;
+ } else if (error) {
+ dev_err(&client->dev, "Failed to read number of contacts: %d\n",
+ error);
+ return error;
+ } else if (iqs7211->num_contacts > IQS7211_MAX_CONTACTS) {
+ dev_err(&client->dev, "Invalid number of contacts: %u\n",
+ iqs7211->num_contacts);
+ return -EINVAL;
+ }
+
+ iqs7211->tp_config.num_contacts = iqs7211->num_contacts ? : 1;
+
+ if (!iqs7211->num_contacts)
+ return 0;
+
+ iqs7211->event_mask |= IQS7211_EVENT_MASK_MOVE;
+
+ tp_idev = devm_input_allocate_device(&client->dev);
+ if (!tp_idev)
+ return -ENOMEM;
+
+ iqs7211->tp_idev = tp_idev;
+
+ tp_idev->name = dev_desc->tp_name;
+ tp_idev->id.bustype = BUS_I2C;
+
+ input_set_abs_params(tp_idev, ABS_MT_POSITION_X,
+ 0, le16_to_cpu(iqs7211->tp_config.max_x), 0, 0);
+
+ input_set_abs_params(tp_idev, ABS_MT_POSITION_Y,
+ 0, le16_to_cpu(iqs7211->tp_config.max_y), 0, 0);
+
+ input_set_abs_params(tp_idev, ABS_MT_PRESSURE, 0, U16_MAX, 0, 0);
+
+ touchscreen_parse_properties(tp_idev, true, prop);
+
+ /*
+ * The device reserves 0xFFFF for coordinates that correspond to slots
+ * which are not in a state of touch.
+ */
+ if (prop->max_x >= U16_MAX || prop->max_y >= U16_MAX) {
+ dev_err(&client->dev, "Invalid trackpad size: %u*%u\n",
+ prop->max_x, prop->max_y);
+ return -EINVAL;
+ }
+
+ iqs7211->tp_config.max_x = cpu_to_le16(prop->max_x);
+ iqs7211->tp_config.max_y = cpu_to_le16(prop->max_y);
+
+ error = input_mt_init_slots(tp_idev, iqs7211->num_contacts,
+ INPUT_MT_DIRECT);
+ if (error) {
+ dev_err(&client->dev, "Failed to initialize slots: %d\n",
+ error);
+ return error;
+ }
+
+ error = input_register_device(tp_idev);
+ if (error)
+ dev_err(&client->dev, "Failed to register %s: %d\n",
+ tp_idev->name, error);
+
+ return error;
+}
+
+static int iqs7211_report(struct iqs7211_private *iqs7211)
+{
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ struct i2c_client *client = iqs7211->client;
+ struct iqs7211_touch_data *touch_data;
+ u16 info_flags, charge_mode, gesture_flags;
+ __le16 status[12];
+ int error, i;
+
+ error = iqs7211_read_burst(iqs7211, dev_desc->sys_stat, status,
+ dev_desc->contact_offs * sizeof(__le16) +
+ iqs7211->num_contacts * sizeof(*touch_data));
+ if (error)
+ return error;
+
+ info_flags = le16_to_cpu(status[dev_desc->info_offs]);
+
+ if (info_flags & dev_desc->show_reset) {
+ dev_err(&client->dev, "Unexpected device reset\n");
+
+ /*
+ * The device may or may not expect forced communication after
+ * it exits hardware reset, so the corresponding state machine
+ * must be reset as well.
+ */
+ iqs7211->comms_mode = iqs7211->comms_init;
+
+ return iqs7211_init_device(iqs7211);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dev_desc->ati_error); i++) {
+ if (!(info_flags & dev_desc->ati_error[i]))
+ continue;
+
+ dev_err(&client->dev, "Unexpected %s ATI error\n",
+ iqs7211_reg_grp_names[i]);
+ return 0;
+ }
+
+ for (i = 0; i < iqs7211->num_contacts; i++) {
+ u16 pressure;
+
+ touch_data = (struct iqs7211_touch_data *)
+ &status[dev_desc->contact_offs] + i;
+ pressure = le16_to_cpu(touch_data->pressure);
+
+ input_mt_slot(iqs7211->tp_idev, i);
+ if (input_mt_report_slot_state(iqs7211->tp_idev, MT_TOOL_FINGER,
+ pressure != 0)) {
+ touchscreen_report_pos(iqs7211->tp_idev, &iqs7211->prop,
+ le16_to_cpu(touch_data->abs_x),
+ le16_to_cpu(touch_data->abs_y),
+ true);
+ input_report_abs(iqs7211->tp_idev, ABS_MT_PRESSURE,
+ pressure);
+ }
+ }
+
+ if (iqs7211->num_contacts) {
+ input_mt_sync_frame(iqs7211->tp_idev);
+ input_sync(iqs7211->tp_idev);
+ }
+
+ if (!iqs7211->kp_idev)
+ return 0;
+
+ charge_mode = info_flags & GENMASK(dev_desc->charge_shift + 2,
+ dev_desc->charge_shift);
+ charge_mode >>= dev_desc->charge_shift;
+
+ /*
+ * A charging mode higher than 2 (idle mode) indicates the device last
+ * operated in low-power mode and intends to express an ALP event.
+ */
+ if (info_flags & dev_desc->kp_events->mask && charge_mode > 2) {
+ input_report_key(iqs7211->kp_idev, *iqs7211->kp_code, 1);
+ input_sync(iqs7211->kp_idev);
+
+ input_report_key(iqs7211->kp_idev, *iqs7211->kp_code, 0);
+ }
+
+ for (i = 0; i < dev_desc->num_kp_events; i++) {
+ if (dev_desc->kp_events[i].reg_grp != IQS7211_REG_GRP_BTN)
+ continue;
+
+ input_report_key(iqs7211->kp_idev, iqs7211->kp_code[i],
+ info_flags & dev_desc->kp_events[i].mask);
+ }
+
+ gesture_flags = le16_to_cpu(status[dev_desc->gesture_offs]);
+
+ for (i = 0; i < dev_desc->num_kp_events; i++) {
+ enum iqs7211_reg_key_id reg_key = dev_desc->kp_events[i].reg_key;
+ u16 mask = dev_desc->kp_events[i].mask;
+
+ if (dev_desc->kp_events[i].reg_grp != IQS7211_REG_GRP_TP)
+ continue;
+
+ if ((gesture_flags ^ iqs7211->gesture_cache) & mask)
+ input_report_key(iqs7211->kp_idev, iqs7211->kp_code[i],
+ gesture_flags & mask);
+
+ iqs7211->gesture_cache &= ~mask;
+
+ /*
+ * Hold and palm gestures persist while the contact remains in
+ * place; all others are momentary and hence are followed by a
+ * complementary release event.
+ */
+ if (reg_key == IQS7211_REG_KEY_HOLD ||
+ reg_key == IQS7211_REG_KEY_PALM) {
+ iqs7211->gesture_cache |= gesture_flags & mask;
+ gesture_flags &= ~mask;
+ }
+ }
+
+ if (gesture_flags) {
+ input_sync(iqs7211->kp_idev);
+
+ for (i = 0; i < dev_desc->num_kp_events; i++)
+ if (dev_desc->kp_events[i].reg_grp == IQS7211_REG_GRP_TP &&
+ gesture_flags & dev_desc->kp_events[i].mask)
+ input_report_key(iqs7211->kp_idev,
+ iqs7211->kp_code[i], 0);
+ }
+
+ input_sync(iqs7211->kp_idev);
+
+ return 0;
+}
+
+static irqreturn_t iqs7211_irq(int irq, void *context)
+{
+ struct iqs7211_private *iqs7211 = context;
+
+ return iqs7211_report(iqs7211) ? IRQ_NONE : IRQ_HANDLED;
+}
+
+static int iqs7211_suspend(struct device *dev)
+{
+ struct iqs7211_private *iqs7211 = dev_get_drvdata(dev);
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ int error;
+
+ if (!dev_desc->suspend || device_may_wakeup(dev))
+ return 0;
+
+ /*
+ * I2C communication prompts the device to assert its RDY pin if it is
+ * not already asserted. As such, the interrupt must be disabled so as
+ * to prevent reentrant interrupts.
+ */
+ disable_irq(gpiod_to_irq(iqs7211->irq_gpio));
+
+ error = iqs7211_write_word(iqs7211, dev_desc->sys_ctrl,
+ dev_desc->suspend);
+
+ enable_irq(gpiod_to_irq(iqs7211->irq_gpio));
+
+ return error;
+}
+
+static int iqs7211_resume(struct device *dev)
+{
+ struct iqs7211_private *iqs7211 = dev_get_drvdata(dev);
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ __le16 sys_ctrl[] = {
+ 0,
+ cpu_to_le16(iqs7211->event_mask),
+ };
+ int error;
+
+ if (!dev_desc->suspend || device_may_wakeup(dev))
+ return 0;
+
+ disable_irq(gpiod_to_irq(iqs7211->irq_gpio));
+
+ /*
+ * Forced communication, if in use, must be explicitly enabled as part
+ * of the wake-up command.
+ */
+ error = iqs7211_write_burst(iqs7211, dev_desc->sys_ctrl, sys_ctrl,
+ sizeof(sys_ctrl));
+
+ enable_irq(gpiod_to_irq(iqs7211->irq_gpio));
+
+ return error;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(iqs7211_pm, iqs7211_suspend, iqs7211_resume);
+
+static ssize_t fw_info_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iqs7211_private *iqs7211 = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u.%u.%u.%u:%u.%u\n",
+ le16_to_cpu(iqs7211->ver_info.prod_num),
+ le32_to_cpu(iqs7211->ver_info.patch),
+ le16_to_cpu(iqs7211->ver_info.major),
+ le16_to_cpu(iqs7211->ver_info.minor),
+ iqs7211->exp_file[1], iqs7211->exp_file[0]);
+}
+
+static DEVICE_ATTR_RO(fw_info);
+
+static struct attribute *iqs7211_attrs[] = {
+ &dev_attr_fw_info.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(iqs7211);
+
+static const struct of_device_id iqs7211_of_match[] = {
+ {
+ .compatible = "azoteq,iqs7210a",
+ .data = &iqs7211_devs[IQS7210A],
+ },
+ {
+ .compatible = "azoteq,iqs7211a",
+ .data = &iqs7211_devs[IQS7211A],
+ },
+ {
+ .compatible = "azoteq,iqs7211e",
+ .data = &iqs7211_devs[IQS7211E],
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, iqs7211_of_match);
+
+static int iqs7211_probe(struct i2c_client *client)
+{
+ struct iqs7211_private *iqs7211;
+ enum iqs7211_reg_grp_id reg_grp;
+ unsigned long irq_flags;
+ bool shared_irq;
+ int error, irq;
+
+ iqs7211 = devm_kzalloc(&client->dev, sizeof(*iqs7211), GFP_KERNEL);
+ if (!iqs7211)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, iqs7211);
+ iqs7211->client = client;
+
+ INIT_LIST_HEAD(&iqs7211->reg_field_head);
+
+ iqs7211->dev_desc = device_get_match_data(&client->dev);
+ if (!iqs7211->dev_desc)
+ return -ENODEV;
+
+ shared_irq = iqs7211->dev_desc->num_ctx == IQS7211_MAX_CTX;
+
+ /*
+ * The RDY pin behaves as an interrupt, but must also be polled ahead
+ * of unsolicited I2C communication. As such, it is first opened as a
+ * GPIO and then passed to gpiod_to_irq() to register the interrupt.
+ *
+ * If an extra CTx pin is present, the RDY and MCLR pins are combined
+ * into a single bidirectional pin. In that case, the platform's GPIO
+ * must be configured as an open-drain output.
+ */
+ iqs7211->irq_gpio = devm_gpiod_get(&client->dev, "irq",
+ shared_irq ? GPIOD_OUT_LOW
+ : GPIOD_IN);
+ if (IS_ERR(iqs7211->irq_gpio)) {
+ error = PTR_ERR(iqs7211->irq_gpio);
+ dev_err(&client->dev, "Failed to request IRQ GPIO: %d\n",
+ error);
+ return error;
+ }
+
+ if (shared_irq) {
+ iqs7211->reset_gpio = iqs7211->irq_gpio;
+ } else {
+ iqs7211->reset_gpio = devm_gpiod_get_optional(&client->dev,
+ "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(iqs7211->reset_gpio)) {
+ error = PTR_ERR(iqs7211->reset_gpio);
+ dev_err(&client->dev,
+ "Failed to request reset GPIO: %d\n", error);
+ return error;
+ }
+ }
+
+ error = iqs7211_start_comms(iqs7211);
+ if (error)
+ return error;
+
+ for (reg_grp = 0; reg_grp < IQS7211_NUM_REG_GRPS; reg_grp++) {
+ const char *reg_grp_name = iqs7211_reg_grp_names[reg_grp];
+ struct fwnode_handle *reg_grp_node;
+
+ if (reg_grp_name)
+ reg_grp_node = device_get_named_child_node(&client->dev,
+ reg_grp_name);
+ else
+ reg_grp_node = fwnode_handle_get(dev_fwnode(&client->dev));
+
+ if (!reg_grp_node)
+ continue;
+
+ error = iqs7211_parse_reg_grp(iqs7211, reg_grp_node, reg_grp);
+ fwnode_handle_put(reg_grp_node);
+ if (error)
+ return error;
+ }
+
+ error = iqs7211_register_kp(iqs7211);
+ if (error)
+ return error;
+
+ error = iqs7211_register_tp(iqs7211);
+ if (error)
+ return error;
+
+ error = iqs7211_init_device(iqs7211);
+ if (error)
+ return error;
+
+ irq = gpiod_to_irq(iqs7211->irq_gpio);
+ if (irq < 0)
+ return irq;
+
+ irq_flags = gpiod_is_active_low(iqs7211->irq_gpio) ? IRQF_TRIGGER_LOW
+ : IRQF_TRIGGER_HIGH;
+ irq_flags |= IRQF_ONESHOT;
+
+ error = devm_request_threaded_irq(&client->dev, irq, NULL, iqs7211_irq,
+ irq_flags, client->name, iqs7211);
+ if (error)
+ dev_err(&client->dev, "Failed to request IRQ: %d\n", error);
+
+ return error;
+}
+
+static struct i2c_driver iqs7211_i2c_driver = {
+ .probe = iqs7211_probe,
+ .driver = {
+ .name = "iqs7211",
+ .of_match_table = iqs7211_of_match,
+ .dev_groups = iqs7211_groups,
+ .pm = pm_sleep_ptr(&iqs7211_pm),
+ },
+};
+module_i2c_driver(iqs7211_i2c_driver);
+
+MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>");
+MODULE_DESCRIPTION("Azoteq IQS7210A/7211A/E Trackpad/Touchscreen Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/lpc32xx_ts.c b/drivers/input/touchscreen/lpc32xx_ts.c
index 15b5cb763526..9bad8b93c039 100644
--- a/drivers/input/touchscreen/lpc32xx_ts.c
+++ b/drivers/input/touchscreen/lpc32xx_ts.c
@@ -198,54 +198,36 @@ static void lpc32xx_ts_close(struct input_dev *dev)
static int lpc32xx_ts_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct lpc32xx_tsc *tsc;
struct input_dev *input;
- struct resource *res;
- resource_size_t size;
int irq;
int error;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Can't get memory resource\n");
- return -ENOENT;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- tsc = kzalloc(sizeof(*tsc), GFP_KERNEL);
- input = input_allocate_device();
- if (!tsc || !input) {
- dev_err(&pdev->dev, "failed allocating memory\n");
- error = -ENOMEM;
- goto err_free_mem;
- }
+ tsc = devm_kzalloc(dev, sizeof(*tsc), GFP_KERNEL);
+ if (!tsc)
+ return -ENOMEM;
- tsc->dev = input;
tsc->irq = irq;
- size = resource_size(res);
-
- if (!request_mem_region(res->start, size, pdev->name)) {
- dev_err(&pdev->dev, "TSC registers are not free\n");
- error = -EBUSY;
- goto err_free_mem;
- }
+ tsc->tsc_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(tsc->tsc_base))
+ return PTR_ERR(tsc->tsc_base);
- tsc->tsc_base = ioremap(res->start, size);
- if (!tsc->tsc_base) {
- dev_err(&pdev->dev, "Can't map memory\n");
- error = -ENOMEM;
- goto err_release_mem;
- }
-
- tsc->clk = clk_get(&pdev->dev, NULL);
+ tsc->clk = devm_clk_get(dev, NULL);
if (IS_ERR(tsc->clk)) {
dev_err(&pdev->dev, "failed getting clock\n");
- error = PTR_ERR(tsc->clk);
- goto err_unmap;
+ return PTR_ERR(tsc->clk);
+ }
+
+ input = devm_input_allocate_device(dev);
+ if (!input) {
+ dev_err(&pdev->dev, "failed allocating input device\n");
+ return -ENOMEM;
}
input->name = MOD_NAME;
@@ -254,68 +236,33 @@ static int lpc32xx_ts_probe(struct platform_device *pdev)
input->id.vendor = 0x0001;
input->id.product = 0x0002;
input->id.version = 0x0100;
- input->dev.parent = &pdev->dev;
input->open = lpc32xx_ts_open;
input->close = lpc32xx_ts_close;
- input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+ input_set_capability(input, EV_KEY, BTN_TOUCH);
input_set_abs_params(input, ABS_X, LPC32XX_TSC_MIN_XY_VAL,
LPC32XX_TSC_MAX_XY_VAL, 0, 0);
input_set_abs_params(input, ABS_Y, LPC32XX_TSC_MIN_XY_VAL,
LPC32XX_TSC_MAX_XY_VAL, 0, 0);
input_set_drvdata(input, tsc);
+ tsc->dev = input;
- error = request_irq(tsc->irq, lpc32xx_ts_interrupt,
- 0, pdev->name, tsc);
+ error = devm_request_irq(dev, tsc->irq, lpc32xx_ts_interrupt,
+ 0, pdev->name, tsc);
if (error) {
dev_err(&pdev->dev, "failed requesting interrupt\n");
- goto err_put_clock;
+ return error;
}
error = input_register_device(input);
if (error) {
dev_err(&pdev->dev, "failed registering input device\n");
- goto err_free_irq;
+ return error;
}
platform_set_drvdata(pdev, tsc);
- device_init_wakeup(&pdev->dev, 1);
-
- return 0;
-
-err_free_irq:
- free_irq(tsc->irq, tsc);
-err_put_clock:
- clk_put(tsc->clk);
-err_unmap:
- iounmap(tsc->tsc_base);
-err_release_mem:
- release_mem_region(res->start, size);
-err_free_mem:
- input_free_device(input);
- kfree(tsc);
-
- return error;
-}
-
-static int lpc32xx_ts_remove(struct platform_device *pdev)
-{
- struct lpc32xx_tsc *tsc = platform_get_drvdata(pdev);
- struct resource *res;
-
- free_irq(tsc->irq, tsc);
-
- input_unregister_device(tsc->dev);
-
- clk_put(tsc->clk);
-
- iounmap(tsc->tsc_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
- kfree(tsc);
+ device_init_wakeup(&pdev->dev, true);
return 0;
}
@@ -384,7 +331,6 @@ MODULE_DEVICE_TABLE(of, lpc32xx_tsc_of_match);
static struct platform_driver lpc32xx_ts_driver = {
.probe = lpc32xx_ts_probe,
- .remove = lpc32xx_ts_remove,
.driver = {
.name = MOD_NAME,
.pm = LPC32XX_TS_PM_OPS,
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index 32896e5085bd..2ac4483fbc25 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -1451,13 +1451,8 @@ static int mip4_probe(struct i2c_client *client)
ts->gpio_ce = devm_gpiod_get_optional(&client->dev,
"ce", GPIOD_OUT_LOW);
- if (IS_ERR(ts->gpio_ce)) {
- error = PTR_ERR(ts->gpio_ce);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev,
- "Failed to get gpio: %d\n", error);
- return error;
- }
+ if (IS_ERR(ts->gpio_ce))
+ return dev_err_probe(&client->dev, PTR_ERR(ts->gpio_ce), "Failed to get gpio\n");
error = mip4_power_on(ts);
if (error)
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index ac12494c7930..af233b6a16d9 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -7,7 +7,6 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/i2c.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
@@ -43,6 +42,7 @@
/* Touchscreen absolute values */
#define MMS114_MAX_AREA 0xff
+#define MMS114_MAX_TOUCHKEYS 15
#define MMS114_MAX_TOUCH 10
#define MMS114_EVENT_SIZE 8
#define MMS136_EVENT_SIZE 6
@@ -70,6 +70,9 @@ struct mms114_data {
unsigned int contact_threshold;
unsigned int moving_threshold;
+ u32 keycodes[MMS114_MAX_TOUCHKEYS];
+ int num_keycodes;
+
/* Use cache data for mode control register(write only) */
u8 cache_mode_control;
};
@@ -167,11 +170,6 @@ static void mms114_process_mt(struct mms114_data *data, struct mms114_touch *tou
return;
}
- if (touch->type != MMS114_TYPE_TOUCHSCREEN) {
- dev_err(&client->dev, "Wrong touch type (%d)\n", touch->type);
- return;
- }
-
id = touch->id - 1;
x = touch->x_lo | touch->x_hi << 8;
y = touch->y_lo | touch->y_hi << 8;
@@ -191,9 +189,33 @@ static void mms114_process_mt(struct mms114_data *data, struct mms114_touch *tou
}
}
+static void mms114_process_touchkey(struct mms114_data *data,
+ struct mms114_touch *touch)
+{
+ struct i2c_client *client = data->client;
+ struct input_dev *input_dev = data->input_dev;
+ unsigned int keycode_id;
+
+ if (touch->id == 0)
+ return;
+
+ if (touch->id > data->num_keycodes) {
+ dev_err(&client->dev, "Wrong touch id for touchkey (%d)\n",
+ touch->id);
+ return;
+ }
+
+ keycode_id = touch->id - 1;
+ dev_dbg(&client->dev, "keycode id: %d, pressed: %d\n", keycode_id,
+ touch->pressed);
+
+ input_report_key(input_dev, data->keycodes[keycode_id], touch->pressed);
+}
+
static irqreturn_t mms114_interrupt(int irq, void *dev_id)
{
struct mms114_data *data = dev_id;
+ struct i2c_client *client = data->client;
struct input_dev *input_dev = data->input_dev;
struct mms114_touch touch[MMS114_MAX_TOUCH];
int packet_size;
@@ -223,8 +245,22 @@ static irqreturn_t mms114_interrupt(int irq, void *dev_id)
if (error < 0)
goto out;
- for (index = 0; index < touch_size; index++)
- mms114_process_mt(data, touch + index);
+ for (index = 0; index < touch_size; index++) {
+ switch (touch[index].type) {
+ case MMS114_TYPE_TOUCHSCREEN:
+ mms114_process_mt(data, touch + index);
+ break;
+
+ case MMS114_TYPE_TOUCHKEY:
+ mms114_process_touchkey(data, touch + index);
+ break;
+
+ default:
+ dev_err(&client->dev, "Wrong touch type (%d)\n",
+ touch[index].type);
+ break;
+ }
+ }
input_mt_report_pointer_emulation(data->input_dev, true);
input_sync(data->input_dev);
@@ -446,6 +482,7 @@ static int mms114_probe(struct i2c_client *client)
struct input_dev *input_dev;
const void *match_data;
int error;
+ int i;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "Not supported I2C adapter\n");
@@ -469,6 +506,42 @@ static int mms114_probe(struct i2c_client *client)
data->type = (enum mms_type)match_data;
+ data->num_keycodes = device_property_count_u32(&client->dev,
+ "linux,keycodes");
+ if (data->num_keycodes == -EINVAL) {
+ data->num_keycodes = 0;
+ } else if (data->num_keycodes < 0) {
+ dev_err(&client->dev,
+ "Unable to parse linux,keycodes property: %d\n",
+ data->num_keycodes);
+ return data->num_keycodes;
+ } else if (data->num_keycodes > MMS114_MAX_TOUCHKEYS) {
+ dev_warn(&client->dev,
+ "Found %d linux,keycodes but max is %d, ignoring the rest\n",
+ data->num_keycodes, MMS114_MAX_TOUCHKEYS);
+ data->num_keycodes = MMS114_MAX_TOUCHKEYS;
+ }
+
+ if (data->num_keycodes > 0) {
+ error = device_property_read_u32_array(&client->dev,
+ "linux,keycodes",
+ data->keycodes,
+ data->num_keycodes);
+ if (error) {
+ dev_err(&client->dev,
+ "Unable to read linux,keycodes values: %d\n",
+ error);
+ return error;
+ }
+
+ input_dev->keycode = data->keycodes;
+ input_dev->keycodemax = data->num_keycodes;
+ input_dev->keycodesize = sizeof(data->keycodes[0]);
+ for (i = 0; i < data->num_keycodes; i++)
+ input_set_capability(input_dev,
+ EV_KEY, data->keycodes[i]);
+ }
+
input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_X);
input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_Y);
input_set_abs_params(input_dev, ABS_MT_PRESSURE, 0, 255, 0, 0);
diff --git a/drivers/input/touchscreen/novatek-nvt-ts.c b/drivers/input/touchscreen/novatek-nvt-ts.c
index 7f7d879aac6d..1a797e410a3f 100644
--- a/drivers/input/touchscreen/novatek-nvt-ts.c
+++ b/drivers/input/touchscreen/novatek-nvt-ts.c
@@ -1,9 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Driver for Novatek i2c touchscreen controller as found on
- * the Acer Iconia One 7 B1-750 tablet. The Touchscreen controller
- * model-number is unknown. Android calls this a "NVT-ts" touchscreen,
- * but that may apply to other Novatek controller models too.
+ * Driver for Novatek NT11205 i2c touchscreen controller as found
+ * on the Acer Iconia One 7 B1-750 tablet.
*
* Copyright (c) 2023 Hans de Goede <hdegoede@redhat.com>
*/
@@ -272,7 +270,7 @@ static int nvt_ts_probe(struct i2c_client *client)
error = input_register_device(input);
if (error) {
- dev_err(dev, "failed to request irq: %d\n", error);
+ dev_err(dev, "failed to register input device: %d\n", error);
return error;
}
@@ -296,6 +294,6 @@ static struct i2c_driver nvt_ts_driver = {
module_i2c_driver(nvt_ts_driver);
-MODULE_DESCRIPTION("Novatek NVT-ts touchscreen driver");
+MODULE_DESCRIPTION("Novatek NT11205 touchscreen driver");
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index 554e179c2e48..4ede0687beb0 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -13,8 +13,8 @@
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
#include <linux/interrupt.h>
-#include <linux/of_device.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/slab.h>
#define PIXCIR_MAX_SLOTS 5 /* Max fingers supported by driver */
@@ -515,41 +515,27 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client)
input_set_drvdata(input, tsdata);
tsdata->gpio_attb = devm_gpiod_get(dev, "attb", GPIOD_IN);
- if (IS_ERR(tsdata->gpio_attb)) {
- error = PTR_ERR(tsdata->gpio_attb);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Failed to request ATTB gpio: %d\n",
- error);
- return error;
- }
+ if (IS_ERR(tsdata->gpio_attb))
+ return dev_err_probe(dev, PTR_ERR(tsdata->gpio_attb),
+ "Failed to request ATTB gpio\n");
tsdata->gpio_reset = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_LOW);
- if (IS_ERR(tsdata->gpio_reset)) {
- error = PTR_ERR(tsdata->gpio_reset);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Failed to request RESET gpio: %d\n",
- error);
- return error;
- }
+ if (IS_ERR(tsdata->gpio_reset))
+ return dev_err_probe(dev, PTR_ERR(tsdata->gpio_reset),
+ "Failed to request RESET gpio\n");
tsdata->gpio_wake = devm_gpiod_get_optional(dev, "wake",
GPIOD_OUT_HIGH);
- if (IS_ERR(tsdata->gpio_wake)) {
- error = PTR_ERR(tsdata->gpio_wake);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Failed to get wake gpio: %d\n", error);
- return error;
- }
+ if (IS_ERR(tsdata->gpio_wake))
+ return dev_err_probe(dev, PTR_ERR(tsdata->gpio_wake),
+ "Failed to get wake gpio\n");
tsdata->gpio_enable = devm_gpiod_get_optional(dev, "enable",
GPIOD_OUT_HIGH);
- if (IS_ERR(tsdata->gpio_enable)) {
- error = PTR_ERR(tsdata->gpio_enable);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Failed to get enable gpio: %d\n", error);
- return error;
- }
+ if (IS_ERR(tsdata->gpio_enable))
+ return dev_err_probe(dev, PTR_ERR(tsdata->gpio_enable),
+ "Failed to get enable gpio\n");
if (tsdata->gpio_enable)
msleep(100);
diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
index 76e7d62d5870..78dd3059d585 100644
--- a/drivers/input/touchscreen/raydium_i2c_ts.c
+++ b/drivers/input/touchscreen/raydium_i2c_ts.c
@@ -1087,32 +1087,20 @@ static int raydium_i2c_probe(struct i2c_client *client)
i2c_set_clientdata(client, ts);
ts->avdd = devm_regulator_get(&client->dev, "avdd");
- if (IS_ERR(ts->avdd)) {
- error = PTR_ERR(ts->avdd);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev,
- "Failed to get 'avdd' regulator: %d\n", error);
- return error;
- }
+ if (IS_ERR(ts->avdd))
+ return dev_err_probe(&client->dev, PTR_ERR(ts->avdd),
+ "Failed to get 'avdd' regulator\n");
ts->vccio = devm_regulator_get(&client->dev, "vccio");
- if (IS_ERR(ts->vccio)) {
- error = PTR_ERR(ts->vccio);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev,
- "Failed to get 'vccio' regulator: %d\n", error);
- return error;
- }
+ if (IS_ERR(ts->vccio))
+ return dev_err_probe(&client->dev, PTR_ERR(ts->vccio),
+ "Failed to get 'vccio' regulator\n");
ts->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
GPIOD_OUT_LOW);
- if (IS_ERR(ts->reset_gpio)) {
- error = PTR_ERR(ts->reset_gpio);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev,
- "failed to get reset gpio: %d\n", error);
- return error;
- }
+ if (IS_ERR(ts->reset_gpio))
+ return dev_err_probe(&client->dev, PTR_ERR(ts->reset_gpio),
+ "Failed to get reset gpio\n");
error = raydium_i2c_power_on(ts);
if (error)
diff --git a/drivers/input/touchscreen/resistive-adc-touch.c b/drivers/input/touchscreen/resistive-adc-touch.c
index 6f754a8d30b1..7e761ec73273 100644
--- a/drivers/input/touchscreen/resistive-adc-touch.c
+++ b/drivers/input/touchscreen/resistive-adc-touch.c
@@ -210,12 +210,8 @@ static int grts_probe(struct platform_device *pdev)
/* get the channels from IIO device */
st->iio_chans = devm_iio_channel_get_all(dev);
- if (IS_ERR(st->iio_chans)) {
- error = PTR_ERR(st->iio_chans);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "can't get iio channels.\n");
- return error;
- }
+ if (IS_ERR(st->iio_chans))
+ return dev_err_probe(dev, PTR_ERR(st->iio_chans), "can't get iio channels\n");
if (!device_property_present(dev, "io-channel-names"))
return -ENODEV;
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index 9e28f962e059..62f562ad5026 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -706,11 +706,9 @@ static int silead_ts_probe(struct i2c_client *client)
/* Power GPIO pin */
data->gpio_power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
- if (IS_ERR(data->gpio_power)) {
- if (PTR_ERR(data->gpio_power) != -EPROBE_DEFER)
- dev_err(dev, "Shutdown GPIO request failed\n");
- return PTR_ERR(data->gpio_power);
- }
+ if (IS_ERR(data->gpio_power))
+ return dev_err_probe(dev, PTR_ERR(data->gpio_power),
+ "Shutdown GPIO request failed\n");
error = silead_ts_setup(client);
if (error)
diff --git a/drivers/input/touchscreen/sis_i2c.c b/drivers/input/touchscreen/sis_i2c.c
index 426564d0fc39..ed56cb546f39 100644
--- a/drivers/input/touchscreen/sis_i2c.c
+++ b/drivers/input/touchscreen/sis_i2c.c
@@ -310,23 +310,15 @@ static int sis_ts_probe(struct i2c_client *client)
ts->attn_gpio = devm_gpiod_get_optional(&client->dev,
"attn", GPIOD_IN);
- if (IS_ERR(ts->attn_gpio)) {
- error = PTR_ERR(ts->attn_gpio);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev,
- "Failed to get attention GPIO: %d\n", error);
- return error;
- }
+ if (IS_ERR(ts->attn_gpio))
+ return dev_err_probe(&client->dev, PTR_ERR(ts->attn_gpio),
+ "Failed to get attention GPIO\n");
ts->reset_gpio = devm_gpiod_get_optional(&client->dev,
"reset", GPIOD_OUT_LOW);
- if (IS_ERR(ts->reset_gpio)) {
- error = PTR_ERR(ts->reset_gpio);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev,
- "Failed to get reset GPIO: %d\n", error);
- return error;
- }
+ if (IS_ERR(ts->reset_gpio))
+ return dev_err_probe(&client->dev, PTR_ERR(ts->reset_gpio),
+ "Failed to get reset GPIO\n");
sis_ts_reset(ts);
diff --git a/drivers/input/touchscreen/surface3_spi.c b/drivers/input/touchscreen/surface3_spi.c
index 31d140248f2e..7efbcd0fde4f 100644
--- a/drivers/input/touchscreen/surface3_spi.c
+++ b/drivers/input/touchscreen/surface3_spi.c
@@ -221,7 +221,6 @@ static void surface3_spi_power(struct surface3_ts_data *data, bool on)
*/
static int surface3_spi_get_gpio_config(struct surface3_ts_data *data)
{
- int error;
struct device *dev;
struct gpio_desc *gpiod;
int i;
@@ -231,15 +230,9 @@ static int surface3_spi_get_gpio_config(struct surface3_ts_data *data)
/* Get the reset lines GPIO pin number */
for (i = 0; i < 2; i++) {
gpiod = devm_gpiod_get_index(dev, NULL, i, GPIOD_OUT_LOW);
- if (IS_ERR(gpiod)) {
- error = PTR_ERR(gpiod);
- if (error != -EPROBE_DEFER)
- dev_err(dev,
- "Failed to get power GPIO %d: %d\n",
- i,
- error);
- return error;
- }
+ if (IS_ERR(gpiod))
+ return dev_err_probe(dev, PTR_ERR(gpiod),
+ "Failed to get power GPIO %d\n", i);
data->gpiod_rst[i] = gpiod;
}
diff --git a/drivers/input/touchscreen/sx8654.c b/drivers/input/touchscreen/sx8654.c
index 0293c493bc79..f5c5881cef6b 100644
--- a/drivers/input/touchscreen/sx8654.c
+++ b/drivers/input/touchscreen/sx8654.c
@@ -323,13 +323,9 @@ static int sx8654_probe(struct i2c_client *client)
sx8654->gpio_reset = devm_gpiod_get_optional(&client->dev, "reset",
GPIOD_OUT_HIGH);
- if (IS_ERR(sx8654->gpio_reset)) {
- error = PTR_ERR(sx8654->gpio_reset);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev, "unable to get reset-gpio: %d\n",
- error);
- return error;
- }
+ if (IS_ERR(sx8654->gpio_reset))
+ return dev_err_probe(&client->dev, PTR_ERR(sx8654->gpio_reset),
+ "unable to get reset-gpio\n");
dev_dbg(&client->dev, "got GPIO reset pin\n");
sx8654->data = device_get_match_data(&client->dev);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index decf2d24a115..9aa4e35fb4f5 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -25,7 +25,6 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/sort.h>
#include <linux/pm_wakeirq.h>
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 0c35018239ce..e2857109e966 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -12,13 +12,14 @@
#include "amd_iommu_types.h"
irqreturn_t amd_iommu_int_thread(int irq, void *data);
+irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data);
+irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data);
+irqreturn_t amd_iommu_int_thread_galog(int irq, void *data);
irqreturn_t amd_iommu_int_handler(int irq, void *data);
void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
void amd_iommu_restart_ga_log(struct amd_iommu *iommu);
-int amd_iommu_init_devices(void);
-void amd_iommu_uninit_devices(void);
-void amd_iommu_init_notifier(void);
+void amd_iommu_restart_ppr_log(struct amd_iommu *iommu);
void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index dc1db6167927..7dc30c2b56b3 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -120,10 +120,13 @@
#define PASID_MASK 0x0000ffff
/* MMIO status bits */
-#define MMIO_STATUS_EVT_OVERFLOW_INT_MASK BIT(0)
+#define MMIO_STATUS_EVT_OVERFLOW_MASK BIT(0)
#define MMIO_STATUS_EVT_INT_MASK BIT(1)
#define MMIO_STATUS_COM_WAIT_INT_MASK BIT(2)
+#define MMIO_STATUS_EVT_RUN_MASK BIT(3)
+#define MMIO_STATUS_PPR_OVERFLOW_MASK BIT(5)
#define MMIO_STATUS_PPR_INT_MASK BIT(6)
+#define MMIO_STATUS_PPR_RUN_MASK BIT(7)
#define MMIO_STATUS_GALOG_RUN_MASK BIT(8)
#define MMIO_STATUS_GALOG_OVERFLOW_MASK BIT(9)
#define MMIO_STATUS_GALOG_INT_MASK BIT(10)
@@ -381,15 +384,15 @@
*/
#define DTE_FLAG_V BIT_ULL(0)
#define DTE_FLAG_TV BIT_ULL(1)
+#define DTE_FLAG_GIOV BIT_ULL(54)
+#define DTE_FLAG_GV BIT_ULL(55)
+#define DTE_GLX_SHIFT (56)
+#define DTE_GLX_MASK (3)
#define DTE_FLAG_IR BIT_ULL(61)
#define DTE_FLAG_IW BIT_ULL(62)
#define DTE_FLAG_IOTLB BIT_ULL(32)
-#define DTE_FLAG_GIOV BIT_ULL(54)
-#define DTE_FLAG_GV BIT_ULL(55)
#define DTE_FLAG_MASK (0x3ffULL << 32)
-#define DTE_GLX_SHIFT (56)
-#define DTE_GLX_MASK (3)
#define DEV_DOMID_MASK 0xffffULL
#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
@@ -702,12 +705,21 @@ struct amd_iommu {
/* event buffer virtual address */
u8 *evt_buf;
+ /* Name for event log interrupt */
+ unsigned char evt_irq_name[16];
+
/* Base of the PPR log, if present */
u8 *ppr_log;
+ /* Name for PPR log interrupt */
+ unsigned char ppr_irq_name[16];
+
/* Base of the GA log, if present */
u8 *ga_log;
+ /* Name for GA log interrupt */
+ unsigned char ga_irq_name[16];
+
/* Tail of the GA log, if present */
u8 *ga_log_tail;
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index ea0f1ab94178..45efb7e5d725 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -483,6 +483,10 @@ static void iommu_disable(struct amd_iommu *iommu)
iommu_feature_disable(iommu, CONTROL_GALOG_EN);
iommu_feature_disable(iommu, CONTROL_GAINT_EN);
+ /* Disable IOMMU PPR logging */
+ iommu_feature_disable(iommu, CONTROL_PPRLOG_EN);
+ iommu_feature_disable(iommu, CONTROL_PPRINT_EN);
+
/* Disable IOMMU hardware itself */
iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
@@ -753,37 +757,61 @@ static int __init alloc_command_buffer(struct amd_iommu *iommu)
}
/*
+ * Interrupt handler has processed all pending events and adjusted head
+ * and tail pointer. Reset overflow mask and restart logging again.
+ */
+static void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type,
+ u8 cntrl_intr, u8 cntrl_log,
+ u32 status_run_mask, u32 status_overflow_mask)
+{
+ u32 status;
+
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ if (status & status_run_mask)
+ return;
+
+ pr_info_ratelimited("IOMMU %s log restarting\n", evt_type);
+
+ iommu_feature_disable(iommu, cntrl_log);
+ iommu_feature_disable(iommu, cntrl_intr);
+
+ writel(status_overflow_mask, iommu->mmio_base + MMIO_STATUS_OFFSET);
+
+ iommu_feature_enable(iommu, cntrl_intr);
+ iommu_feature_enable(iommu, cntrl_log);
+}
+
+/*
* This function restarts event logging in case the IOMMU experienced
* an event log buffer overflow.
*/
void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
{
- iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
- iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
+ amd_iommu_restart_log(iommu, "Event", CONTROL_EVT_INT_EN,
+ CONTROL_EVT_LOG_EN, MMIO_STATUS_EVT_RUN_MASK,
+ MMIO_STATUS_EVT_OVERFLOW_MASK);
}
/*
* This function restarts event logging in case the IOMMU experienced
- * an GA log overflow.
+ * GA log overflow.
*/
void amd_iommu_restart_ga_log(struct amd_iommu *iommu)
{
- u32 status;
-
- status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
- if (status & MMIO_STATUS_GALOG_RUN_MASK)
- return;
-
- pr_info_ratelimited("IOMMU GA Log restarting\n");
-
- iommu_feature_disable(iommu, CONTROL_GALOG_EN);
- iommu_feature_disable(iommu, CONTROL_GAINT_EN);
-
- writel(MMIO_STATUS_GALOG_OVERFLOW_MASK,
- iommu->mmio_base + MMIO_STATUS_OFFSET);
+ amd_iommu_restart_log(iommu, "GA", CONTROL_GAINT_EN,
+ CONTROL_GALOG_EN, MMIO_STATUS_GALOG_RUN_MASK,
+ MMIO_STATUS_GALOG_OVERFLOW_MASK);
+}
- iommu_feature_enable(iommu, CONTROL_GAINT_EN);
- iommu_feature_enable(iommu, CONTROL_GALOG_EN);
+/*
+ * This function restarts ppr logging in case the IOMMU experienced
+ * PPR log overflow.
+ */
+void amd_iommu_restart_ppr_log(struct amd_iommu *iommu)
+{
+ amd_iommu_restart_log(iommu, "PPR", CONTROL_PPRINT_EN,
+ CONTROL_PPRLOG_EN, MMIO_STATUS_PPR_RUN_MASK,
+ MMIO_STATUS_PPR_OVERFLOW_MASK);
}
/*
@@ -906,6 +934,8 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu)
if (iommu->ppr_log == NULL)
return;
+ iommu_feature_enable(iommu, CONTROL_PPR_EN);
+
entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
@@ -916,7 +946,7 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu)
writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
- iommu_feature_enable(iommu, CONTROL_PPR_EN);
+ iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
}
static void __init free_ppr_log(struct amd_iommu *iommu)
@@ -2311,6 +2341,7 @@ static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq
struct irq_data *irqd = irq_domain_get_irq_data(domain, i);
irqd->chip = &intcapxt_controller;
+ irqd->hwirq = info->hwirq;
irqd->chip_data = info->data;
__irq_set_handler(i, handle_edge_irq, 0, "edge");
}
@@ -2337,22 +2368,14 @@ static void intcapxt_unmask_irq(struct irq_data *irqd)
xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0);
xt.destid_24_31 = cfg->dest_apicid >> 24;
- /**
- * Current IOMMU implementation uses the same IRQ for all
- * 3 IOMMU interrupts.
- */
- writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
- writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
- writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
+ writeq(xt.capxt, iommu->mmio_base + irqd->hwirq);
}
static void intcapxt_mask_irq(struct irq_data *irqd)
{
struct amd_iommu *iommu = irqd->chip_data;
- writeq(0, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
- writeq(0, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
- writeq(0, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
+ writeq(0, iommu->mmio_base + irqd->hwirq);
}
@@ -2415,7 +2438,8 @@ static struct irq_domain *iommu_get_irqdomain(void)
return iommu_irqdomain;
}
-static int iommu_setup_intcapxt(struct amd_iommu *iommu)
+static int __iommu_setup_intcapxt(struct amd_iommu *iommu, const char *devname,
+ int hwirq, irq_handler_t thread_fn)
{
struct irq_domain *domain;
struct irq_alloc_info info;
@@ -2429,6 +2453,7 @@ static int iommu_setup_intcapxt(struct amd_iommu *iommu)
init_irq_alloc_info(&info, NULL);
info.type = X86_IRQ_ALLOC_TYPE_AMDVI;
info.data = iommu;
+ info.hwirq = hwirq;
irq = irq_domain_alloc_irqs(domain, 1, node, &info);
if (irq < 0) {
@@ -2437,7 +2462,7 @@ static int iommu_setup_intcapxt(struct amd_iommu *iommu)
}
ret = request_threaded_irq(irq, amd_iommu_int_handler,
- amd_iommu_int_thread, 0, "AMD-Vi", iommu);
+ thread_fn, 0, devname, iommu);
if (ret) {
irq_domain_free_irqs(irq, 1);
irq_domain_remove(domain);
@@ -2447,6 +2472,37 @@ static int iommu_setup_intcapxt(struct amd_iommu *iommu)
return 0;
}
+static int iommu_setup_intcapxt(struct amd_iommu *iommu)
+{
+ int ret;
+
+ snprintf(iommu->evt_irq_name, sizeof(iommu->evt_irq_name),
+ "AMD-Vi%d-Evt", iommu->index);
+ ret = __iommu_setup_intcapxt(iommu, iommu->evt_irq_name,
+ MMIO_INTCAPXT_EVT_OFFSET,
+ amd_iommu_int_thread_evtlog);
+ if (ret)
+ return ret;
+
+ snprintf(iommu->ppr_irq_name, sizeof(iommu->ppr_irq_name),
+ "AMD-Vi%d-PPR", iommu->index);
+ ret = __iommu_setup_intcapxt(iommu, iommu->ppr_irq_name,
+ MMIO_INTCAPXT_PPR_OFFSET,
+ amd_iommu_int_thread_pprlog);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_IRQ_REMAP
+ snprintf(iommu->ga_irq_name, sizeof(iommu->ga_irq_name),
+ "AMD-Vi%d-GA", iommu->index);
+ ret = __iommu_setup_intcapxt(iommu, iommu->ga_irq_name,
+ MMIO_INTCAPXT_GALOG_OFFSET,
+ amd_iommu_int_thread_galog);
+#endif
+
+ return ret;
+}
+
static int iommu_init_irq(struct amd_iommu *iommu)
{
int ret;
@@ -2472,8 +2528,6 @@ enable_faults:
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
- if (iommu->ppr_log != NULL)
- iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
return 0;
}
@@ -2889,8 +2943,6 @@ static void enable_iommus_vapic(void)
static void enable_iommus(void)
{
early_enable_iommus();
- enable_iommus_vapic();
- enable_iommus_v2();
}
static void disable_iommus(void)
@@ -3154,6 +3206,13 @@ static int amd_iommu_enable_interrupts(void)
goto out;
}
+ /*
+ * Interrupt handler is ready to process interrupts. Enable
+ * PPR and GA log interrupt for all IOMMUs.
+ */
+ enable_iommus_vapic();
+ enable_iommus_v2();
+
out:
return ret;
}
@@ -3233,8 +3292,6 @@ static int __init state_next(void)
register_syscore_ops(&amd_iommu_syscore_ops);
ret = amd_iommu_init_pci();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
- enable_iommus_vapic();
- enable_iommus_v2();
break;
case IOMMU_PCI_INIT:
ret = amd_iommu_enable_interrupts();
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 7d7d3799199a..95bd7c25ba6f 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -841,50 +841,27 @@ static inline void
amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
#endif /* !CONFIG_IRQ_REMAP */
-#define AMD_IOMMU_INT_MASK \
- (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \
- MMIO_STATUS_EVT_INT_MASK | \
- MMIO_STATUS_PPR_INT_MASK | \
- MMIO_STATUS_GALOG_OVERFLOW_MASK | \
- MMIO_STATUS_GALOG_INT_MASK)
-
-irqreturn_t amd_iommu_int_thread(int irq, void *data)
+static void amd_iommu_handle_irq(void *data, const char *evt_type,
+ u32 int_mask, u32 overflow_mask,
+ void (*int_handler)(struct amd_iommu *),
+ void (*overflow_handler)(struct amd_iommu *))
{
struct amd_iommu *iommu = (struct amd_iommu *) data;
u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ u32 mask = int_mask | overflow_mask;
- while (status & AMD_IOMMU_INT_MASK) {
+ while (status & mask) {
/* Enable interrupt sources again */
- writel(AMD_IOMMU_INT_MASK,
- iommu->mmio_base + MMIO_STATUS_OFFSET);
+ writel(mask, iommu->mmio_base + MMIO_STATUS_OFFSET);
- if (status & MMIO_STATUS_EVT_INT_MASK) {
- pr_devel("Processing IOMMU Event Log\n");
- iommu_poll_events(iommu);
+ if (int_handler) {
+ pr_devel("Processing IOMMU (ivhd%d) %s Log\n",
+ iommu->index, evt_type);
+ int_handler(iommu);
}
- if (status & MMIO_STATUS_PPR_INT_MASK) {
- pr_devel("Processing IOMMU PPR Log\n");
- iommu_poll_ppr_log(iommu);
- }
-
-#ifdef CONFIG_IRQ_REMAP
- if (status & (MMIO_STATUS_GALOG_INT_MASK |
- MMIO_STATUS_GALOG_OVERFLOW_MASK)) {
- pr_devel("Processing IOMMU GA Log\n");
- iommu_poll_ga_log(iommu);
- }
-
- if (status & MMIO_STATUS_GALOG_OVERFLOW_MASK) {
- pr_info_ratelimited("IOMMU GA Log overflow\n");
- amd_iommu_restart_ga_log(iommu);
- }
-#endif
-
- if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) {
- pr_info_ratelimited("IOMMU event log overflow\n");
- amd_iommu_restart_event_logging(iommu);
- }
+ if ((status & overflow_mask) && overflow_handler)
+ overflow_handler(iommu);
/*
* Hardware bug: ERBT1312
@@ -901,6 +878,43 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
*/
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
}
+}
+
+irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data)
+{
+ amd_iommu_handle_irq(data, "Evt", MMIO_STATUS_EVT_INT_MASK,
+ MMIO_STATUS_EVT_OVERFLOW_MASK,
+ iommu_poll_events, amd_iommu_restart_event_logging);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data)
+{
+ amd_iommu_handle_irq(data, "PPR", MMIO_STATUS_PPR_INT_MASK,
+ MMIO_STATUS_PPR_OVERFLOW_MASK,
+ iommu_poll_ppr_log, amd_iommu_restart_ppr_log);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t amd_iommu_int_thread_galog(int irq, void *data)
+{
+#ifdef CONFIG_IRQ_REMAP
+ amd_iommu_handle_irq(data, "GA", MMIO_STATUS_GALOG_INT_MASK,
+ MMIO_STATUS_GALOG_OVERFLOW_MASK,
+ iommu_poll_ga_log, amd_iommu_restart_ga_log);
+#endif
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t amd_iommu_int_thread(int irq, void *data)
+{
+ amd_iommu_int_thread_evtlog(irq, data);
+ amd_iommu_int_thread_pprlog(irq, data);
+ amd_iommu_int_thread_galog(irq, data);
+
return IRQ_HANDLED;
}
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index 2596466cd5a6..57c2fb1146e2 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -262,8 +262,8 @@ static void put_pasid_state(struct pasid_state *pasid_state)
static void put_pasid_state_wait(struct pasid_state *pasid_state)
{
- refcount_dec(&pasid_state->count);
- wait_event(pasid_state->wq, !refcount_read(&pasid_state->count));
+ if (!refcount_dec_and_test(&pasid_state->count))
+ wait_event(pasid_state->wq, !refcount_read(&pasid_state->count));
free_pasid_state(pasid_state);
}
@@ -327,6 +327,9 @@ static void free_pasid_states(struct device_state *dev_state)
put_pasid_state(pasid_state);
+ /* Clear the pasid state so that the pasid can be re-used */
+ clear_pasid_state(dev_state, pasid_state->pasid);
+
/*
* This will call the mn_release function and
* unbind the PASID
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 8af64b57f048..0b8927508427 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -671,8 +671,7 @@ static int apple_dart_attach_dev(struct iommu_domain *domain,
return ret;
switch (domain->type) {
- case IOMMU_DOMAIN_DMA:
- case IOMMU_DOMAIN_UNMANAGED:
+ default:
ret = apple_dart_domain_add_streams(dart_domain, cfg);
if (ret)
return ret;
@@ -1276,7 +1275,7 @@ static __maybe_unused int apple_dart_resume(struct device *dev)
return 0;
}
-DEFINE_SIMPLE_DEV_PM_OPS(apple_dart_pm_ops, apple_dart_suspend, apple_dart_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(apple_dart_pm_ops, apple_dart_suspend, apple_dart_resume);
static const struct of_device_id apple_dart_of_match[] = {
{ .compatible = "apple,t8103-dart", .data = &apple_dart_hw_t8103 },
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index dbc812a0e57e..8a16cd3ef487 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -80,7 +80,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
* be some overlap between use of both ASIDs, until we invalidate the
* TLB.
*/
- arm_smmu_write_ctx_desc(smmu_domain, 0, cd);
+ arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, cd);
/* Invalidate TLB entries previously associated with that context */
arm_smmu_tlb_inv_asid(smmu, asid);
@@ -186,6 +186,15 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
}
}
+/*
+ * Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this
+ * is used as a threshold to replace per-page TLBI commands to issue in the
+ * command queue with an address-space TLBI command, when SMMU w/o a range
+ * invalidation feature handles too many per-page TLBI commands, which will
+ * otherwise result in a soft lockup.
+ */
+#define CMDQ_MAX_TLBI_OPS (1 << (PAGE_SHIFT - 3))
+
static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
@@ -201,8 +210,13 @@ static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
* range. So do a simple translation here by calculating size correctly.
*/
size = end - start;
- if (size == ULONG_MAX)
- size = 0;
+ if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_RANGE_INV)) {
+ if (size >= CMDQ_MAX_TLBI_OPS * PAGE_SIZE)
+ size = 0;
+ } else {
+ if (size == ULONG_MAX)
+ size = 0;
+ }
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) {
if (!size)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 9b0dc3505601..bd0a596f9863 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1059,7 +1059,7 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
/*
* This function handles the following cases:
*
- * (1) Install primary CD, for normal DMA traffic (SSID = 0).
+ * (1) Install primary CD, for normal DMA traffic (SSID = IOMMU_NO_PASID = 0).
* (2) Install a secondary CD, for SID+SSID traffic.
* (3) Update ASID of a CD. Atomically write the first 64 bits of the
* CD, then invalidate the old entry and mappings.
@@ -1607,7 +1607,7 @@ static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
sid = FIELD_GET(PRIQ_0_SID, evt[0]);
ssv = FIELD_GET(PRIQ_0_SSID_V, evt[0]);
- ssid = ssv ? FIELD_GET(PRIQ_0_SSID, evt[0]) : 0;
+ ssid = ssv ? FIELD_GET(PRIQ_0_SSID, evt[0]) : IOMMU_NO_PASID;
last = FIELD_GET(PRIQ_0_PRG_LAST, evt[0]);
grpid = FIELD_GET(PRIQ_1_PRG_IDX, evt[1]);
@@ -1748,7 +1748,7 @@ arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size,
*/
*cmd = (struct arm_smmu_cmdq_ent) {
.opcode = CMDQ_OP_ATC_INV,
- .substream_valid = !!ssid,
+ .substream_valid = (ssid != IOMMU_NO_PASID),
.atc.ssid = ssid,
};
@@ -1795,7 +1795,7 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
struct arm_smmu_cmdq_ent cmd;
struct arm_smmu_cmdq_batch cmds;
- arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
+ arm_smmu_atc_inv_to_cmd(IOMMU_NO_PASID, 0, 0, &cmd);
cmds.num = 0;
for (i = 0; i < master->num_streams; i++) {
@@ -1875,7 +1875,7 @@ static void arm_smmu_tlb_inv_context(void *cookie)
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
- arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
+ arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0);
}
static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
@@ -1895,18 +1895,23 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
/* Get the leaf page size */
tg = __ffs(smmu_domain->domain.pgsize_bitmap);
+ num_pages = size >> tg;
+
/* Convert page size of 12,14,16 (log2) to 1,2,3 */
cmd->tlbi.tg = (tg - 10) / 2;
/*
- * Determine what level the granule is at. For non-leaf, io-pgtable
- * assumes .tlb_flush_walk can invalidate multiple levels at once,
- * so ignore the nominal last-level granule and leave TTL=0.
+ * Determine what level the granule is at. For non-leaf, both
+ * io-pgtable and SVA pass a nominal last-level granule because
+ * they don't know what level(s) actually apply, so ignore that
+ * and leave TTL=0. However for various errata reasons we still
+ * want to use a range command, so avoid the SVA corner case
+ * where both scale and num could be 0 as well.
*/
if (cmd->tlbi.leaf)
cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
-
- num_pages = size >> tg;
+ else if ((num_pages & CMDQ_TLBI_RANGE_NUM_MAX) == 1)
+ num_pages++;
}
cmds.num = 0;
@@ -1968,7 +1973,7 @@ static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,
* Unfortunately, this can't be leaf-only since we may have
* zapped an entire table.
*/
- arm_smmu_atc_inv_domain(smmu_domain, 0, iova, size);
+ arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, iova, size);
}
void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
@@ -2055,24 +2060,6 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
return &smmu_domain->domain;
}
-static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
-{
- int idx, size = 1 << span;
-
- do {
- idx = find_first_zero_bit(map, size);
- if (idx == size)
- return -ENOSPC;
- } while (test_and_set_bit(idx, map));
-
- return idx;
-}
-
-static void arm_smmu_bitmap_free(unsigned long *map, int idx)
-{
- clear_bit(idx, map);
-}
-
static void arm_smmu_domain_free(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -2093,7 +2080,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
} else {
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
if (cfg->vmid)
- arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
+ ida_free(&smmu->vmid_map, cfg->vmid);
}
kfree(smmu_domain);
@@ -2142,7 +2129,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
* the master has been added to the devices list for this domain.
* This isn't an issue because the STE hasn't been installed yet.
*/
- ret = arm_smmu_write_ctx_desc(smmu_domain, 0, &cfg->cd);
+ ret = arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, &cfg->cd);
if (ret)
goto out_free_cd_tables;
@@ -2167,7 +2154,9 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr;
- vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
+ /* Reserve VMID 0 for stage-2 bypass STEs */
+ vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1,
+ GFP_KERNEL);
if (vmid < 0)
return vmid;
@@ -2328,7 +2317,7 @@ static void arm_smmu_enable_ats(struct arm_smmu_master *master)
pdev = to_pci_dev(master->dev);
atomic_inc(&smmu_domain->nr_ats_masters);
- arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
+ arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0);
if (pci_enable_ats(pdev, stu))
dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu);
}
@@ -3098,8 +3087,8 @@ static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
reg |= STRTAB_BASE_RA;
smmu->strtab_cfg.strtab_base = reg;
- /* Allocate the first VMID for stage-2 bypass STEs */
- set_bit(0, smmu->vmid_map);
+ ida_init(&smmu->vmid_map);
+
return 0;
}
@@ -3923,6 +3912,7 @@ static void arm_smmu_device_remove(struct platform_device *pdev)
iommu_device_sysfs_remove(&smmu->iommu);
arm_smmu_device_disable(smmu);
iopf_queue_free(smmu->evtq.iopf);
+ ida_destroy(&smmu->vmid_map);
}
static void arm_smmu_device_shutdown(struct platform_device *pdev)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index dcab85698a4e..9915850dd4db 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -670,7 +670,7 @@ struct arm_smmu_device {
#define ARM_SMMU_MAX_VMIDS (1 << 16)
unsigned int vmid_bits;
- DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
+ struct ida vmid_map;
unsigned int ssid_bits;
unsigned int sid_bits;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
index b5b14108e086..bb89d49adf8d 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
@@ -3,7 +3,7 @@
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
-#include <linux/of_device.h>
+#include <linux/device.h>
#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/ratelimit.h>
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index c71afda79d64..7f52ac67495f 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -251,10 +251,12 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,sc7280-mss-pil" },
{ .compatible = "qcom,sc8180x-mdss" },
{ .compatible = "qcom,sc8280xp-mdss" },
- { .compatible = "qcom,sm8150-mdss" },
- { .compatible = "qcom,sm8250-mdss" },
{ .compatible = "qcom,sdm845-mdss" },
{ .compatible = "qcom,sdm845-mss-pil" },
+ { .compatible = "qcom,sm6350-mdss" },
+ { .compatible = "qcom,sm6375-mdss" },
+ { .compatible = "qcom,sm8150-mdss" },
+ { .compatible = "qcom,sm8250-mdss" },
{ }
};
@@ -528,6 +530,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
{ .compatible = "qcom,sm6125-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,sm6350-smmu-v2", .data = &qcom_smmu_v2_data },
{ .compatible = "qcom,sm6350-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sm6375-smmu-v2", .data = &qcom_smmu_v2_data },
{ .compatible = "qcom,sm6375-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,sm8150-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,sm8250-smmu-500", .data = &qcom_smmu_500_impl0_data },
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index a86acd76c1df..d6d1a2a55cc0 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -29,7 +29,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index a503ed758ec3..775a3cbaff4e 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -22,8 +22,7 @@
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
@@ -51,14 +50,15 @@ struct qcom_iommu_dev {
struct clk_bulk_data clks[CLK_NUM];
void __iomem *local_base;
u32 sec_id;
- u8 num_ctxs;
- struct qcom_iommu_ctx *ctxs[]; /* indexed by asid-1 */
+ u8 max_asid;
+ struct qcom_iommu_ctx *ctxs[]; /* indexed by asid */
};
struct qcom_iommu_ctx {
struct device *dev;
void __iomem *base;
bool secure_init;
+ bool secured_ctx;
u8 asid; /* asid and ctx bank # are 1:1 */
struct iommu_domain *domain;
};
@@ -94,7 +94,7 @@ static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid
struct qcom_iommu_dev *qcom_iommu = d->iommu;
if (!qcom_iommu)
return NULL;
- return qcom_iommu->ctxs[asid - 1];
+ return qcom_iommu->ctxs[asid];
}
static inline void
@@ -273,6 +273,19 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
ctx->secure_init = true;
}
+ /* Secured QSMMU-500/QSMMU-v2 contexts cannot be programmed */
+ if (ctx->secured_ctx) {
+ ctx->domain = domain;
+ continue;
+ }
+
+ /* Disable context bank before programming */
+ iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
+
+ /* Clear context bank fault address fault status registers */
+ iommu_writel(ctx, ARM_SMMU_CB_FAR, 0);
+ iommu_writel(ctx, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT);
+
/* TTBRs */
iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
pgtbl_cfg.arm_lpae_s1_cfg.ttbr |
@@ -527,11 +540,10 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
qcom_iommu = platform_get_drvdata(iommu_pdev);
/* make sure the asid specified in dt is valid, so we don't have
- * to sanity check this elsewhere, since 'asid - 1' is used to
- * index into qcom_iommu->ctxs:
+ * to sanity check this elsewhere:
*/
- if (WARN_ON(asid < 1) ||
- WARN_ON(asid > qcom_iommu->num_ctxs)) {
+ if (WARN_ON(asid > qcom_iommu->max_asid) ||
+ WARN_ON(qcom_iommu->ctxs[asid] == NULL)) {
put_device(&iommu_pdev->dev);
return -EINVAL;
}
@@ -617,7 +629,8 @@ free_mem:
static int get_asid(const struct device_node *np)
{
- u32 reg;
+ u32 reg, val;
+ int asid;
/* read the "reg" property directly to get the relative address
* of the context bank, and calculate the asid from that:
@@ -625,7 +638,17 @@ static int get_asid(const struct device_node *np)
if (of_property_read_u32_index(np, "reg", 0, &reg))
return -ENODEV;
- return reg / 0x1000; /* context banks are 0x1000 apart */
+ /*
+ * Context banks are 0x1000 apart but, in some cases, the ASID
+ * number doesn't match to this logic and needs to be passed
+ * from the DT configuration explicitly.
+ */
+ if (!of_property_read_u32(np, "qcom,ctx-asid", &val))
+ asid = val;
+ else
+ asid = reg / 0x1000;
+
+ return asid;
}
static int qcom_iommu_ctx_probe(struct platform_device *pdev)
@@ -633,7 +656,6 @@ static int qcom_iommu_ctx_probe(struct platform_device *pdev)
struct qcom_iommu_ctx *ctx;
struct device *dev = &pdev->dev;
struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev->parent);
- struct resource *res;
int ret, irq;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
@@ -643,19 +665,22 @@ static int qcom_iommu_ctx_probe(struct platform_device *pdev)
ctx->dev = dev;
platform_set_drvdata(pdev, ctx);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->base = devm_ioremap_resource(dev, res);
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->base))
return PTR_ERR(ctx->base);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return -ENODEV;
+ return irq;
+
+ if (of_device_is_compatible(dev->of_node, "qcom,msm-iommu-v2-sec"))
+ ctx->secured_ctx = true;
/* clear IRQs before registering fault handler, just in case the
* boot-loader left us a surprise:
*/
- iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR));
+ if (!ctx->secured_ctx)
+ iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR));
ret = devm_request_irq(dev, irq,
qcom_iommu_fault,
@@ -677,7 +702,7 @@ static int qcom_iommu_ctx_probe(struct platform_device *pdev)
dev_dbg(dev, "found asid %u\n", ctx->asid);
- qcom_iommu->ctxs[ctx->asid - 1] = ctx;
+ qcom_iommu->ctxs[ctx->asid] = ctx;
return 0;
}
@@ -689,12 +714,14 @@ static void qcom_iommu_ctx_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- qcom_iommu->ctxs[ctx->asid - 1] = NULL;
+ qcom_iommu->ctxs[ctx->asid] = NULL;
}
static const struct of_device_id ctx_of_match[] = {
{ .compatible = "qcom,msm-iommu-v1-ns" },
{ .compatible = "qcom,msm-iommu-v1-sec" },
+ { .compatible = "qcom,msm-iommu-v2-ns" },
+ { .compatible = "qcom,msm-iommu-v2-sec" },
{ /* sentinel */ }
};
@@ -712,7 +739,8 @@ static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu)
struct device_node *child;
for_each_child_of_node(qcom_iommu->dev->of_node, child) {
- if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec")) {
+ if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec") ||
+ of_device_is_compatible(child, "qcom,msm-iommu-v2-sec")) {
of_node_put(child);
return true;
}
@@ -736,11 +764,11 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
for_each_child_of_node(dev->of_node, child)
max_asid = max(max_asid, get_asid(child));
- qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid),
+ qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid + 1),
GFP_KERNEL);
if (!qcom_iommu)
return -ENOMEM;
- qcom_iommu->num_ctxs = max_asid;
+ qcom_iommu->max_asid = max_asid;
qcom_iommu->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -856,6 +884,7 @@ static const struct dev_pm_ops qcom_iommu_pm_ops = {
static const struct of_device_id qcom_iommu_of_match[] = {
{ .compatible = "qcom,msm-iommu-v1" },
+ { .compatible = "qcom,msm-iommu-v2" },
{ /* sentinel */ }
};
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index e57724163835..4b1a88f514c9 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -660,7 +660,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
- unsigned long shift, iova_len, iova = 0;
+ unsigned long shift, iova_len, iova;
if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
cookie->msi_iova += size;
@@ -675,15 +675,29 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
if (domain->geometry.force_aperture)
dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
- /* Try to get PCI devices a SAC address */
- if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev))
+ /*
+ * Try to use all the 32-bit PCI addresses first. The original SAC vs.
+ * DAC reasoning loses relevance with PCIe, but enough hardware and
+ * firmware bugs are still lurking out there that it's safest not to
+ * venture into the 64-bit space until necessary.
+ *
+ * If your device goes wrong after seeing the notice then likely either
+ * its driver is not setting DMA masks accurately, the hardware has
+ * some inherent bug in handling >32-bit addresses, or not all the
+ * expected address bits are wired up between the device and the IOMMU.
+ */
+ if (dma_limit > DMA_BIT_MASK(32) && dev->iommu->pci_32bit_workaround) {
iova = alloc_iova_fast(iovad, iova_len,
DMA_BIT_MASK(32) >> shift, false);
+ if (iova)
+ goto done;
- if (!iova)
- iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
- true);
+ dev->iommu->pci_32bit_workaround = false;
+ dev_notice(dev, "Using %d-bit DMA addresses\n", bits_per(dma_limit));
+ }
+ iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true);
+done:
return (dma_addr_t)iova << shift;
}
diff --git a/drivers/iommu/dma-iommu.h b/drivers/iommu/dma-iommu.h
index 942790009292..c829f1f82a99 100644
--- a/drivers/iommu/dma-iommu.h
+++ b/drivers/iommu/dma-iommu.h
@@ -17,6 +17,10 @@ int iommu_dma_init_fq(struct iommu_domain *domain);
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
extern bool iommu_dma_forcedac;
+static inline void iommu_dma_set_pci_32bit_workaround(struct device *dev)
+{
+ dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac;
+}
#else /* CONFIG_IOMMU_DMA */
@@ -38,5 +42,9 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he
{
}
+static inline void iommu_dma_set_pci_32bit_workaround(struct device *dev)
+{
+}
+
#endif /* CONFIG_IOMMU_DMA */
#endif /* __DMA_IOMMU_H */
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 9e6f78830ece..3685ba90ec88 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -114,13 +114,17 @@ static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
are never going to work. */
-static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
+static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn)
{
return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
}
+static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn)
+{
+ return ((mm_pfn + 1) << (PAGE_SHIFT - VTD_PAGE_SHIFT)) - 1;
+}
static inline unsigned long page_to_dma_pfn(struct page *pg)
{
- return mm_to_dma_pfn(page_to_pfn(pg));
+ return mm_to_dma_pfn_start(page_to_pfn(pg));
}
static inline unsigned long virt_to_dma_pfn(void *p)
{
@@ -878,7 +882,7 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
}
/* For request-without-pasid, get the pasid from context entry */
if (intel_iommu_sm && pasid == IOMMU_PASID_INVALID)
- pasid = PASID_RID2PASID;
+ pasid = IOMMU_NO_PASID;
dir_index = pasid >> PASID_PDE_SHIFT;
pde = &dir[dir_index];
@@ -1360,6 +1364,7 @@ domain_lookup_dev_info(struct dmar_domain *domain,
static void domain_update_iotlb(struct dmar_domain *domain)
{
+ struct dev_pasid_info *dev_pasid;
struct device_domain_info *info;
bool has_iotlb_device = false;
unsigned long flags;
@@ -1371,6 +1376,14 @@ static void domain_update_iotlb(struct dmar_domain *domain)
break;
}
}
+
+ list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain) {
+ info = dev_iommu_priv_get(dev_pasid->dev);
+ if (info->ats_enabled) {
+ has_iotlb_device = true;
+ break;
+ }
+ }
domain->has_iotlb_device = has_iotlb_device;
spin_unlock_irqrestore(&domain->lock, flags);
}
@@ -1450,12 +1463,13 @@ static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
qdep = info->ats_qdep;
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
qdep, addr, mask);
- quirk_extra_dev_tlb_flush(info, addr, mask, PASID_RID2PASID, qdep);
+ quirk_extra_dev_tlb_flush(info, addr, mask, IOMMU_NO_PASID, qdep);
}
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask)
{
+ struct dev_pasid_info *dev_pasid;
struct device_domain_info *info;
unsigned long flags;
@@ -1465,6 +1479,36 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link)
__iommu_flush_dev_iotlb(info, addr, mask);
+
+ list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain) {
+ info = dev_iommu_priv_get(dev_pasid->dev);
+
+ if (!info->ats_enabled)
+ continue;
+
+ qi_flush_dev_iotlb_pasid(info->iommu,
+ PCI_DEVID(info->bus, info->devfn),
+ info->pfsid, dev_pasid->pasid,
+ info->ats_qdep, addr,
+ mask);
+ }
+ spin_unlock_irqrestore(&domain->lock, flags);
+}
+
+static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
+ struct dmar_domain *domain, u64 addr,
+ unsigned long npages, bool ih)
+{
+ u16 did = domain_id_iommu(domain, iommu);
+ struct dev_pasid_info *dev_pasid;
+ unsigned long flags;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain)
+ qi_flush_piotlb(iommu, did, dev_pasid->pasid, addr, npages, ih);
+
+ if (!list_empty(&domain->devices))
+ qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, addr, npages, ih);
spin_unlock_irqrestore(&domain->lock, flags);
}
@@ -1485,7 +1529,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
ih = 1 << 6;
if (domain->use_first_level) {
- qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, pages, ih);
+ domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih);
} else {
unsigned long bitmask = aligned_pages - 1;
@@ -1555,7 +1599,7 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain)
u16 did = domain_id_iommu(dmar_domain, iommu);
if (dmar_domain->use_first_level)
- qi_flush_piotlb(iommu, did, PASID_RID2PASID, 0, -1, 0);
+ domain_flush_pasid_iotlb(iommu, dmar_domain, 0, -1, 0);
else
iommu->flush.flush_iotlb(iommu, did, 0, 0,
DMA_TLB_DSI_FLUSH);
@@ -1727,6 +1771,7 @@ static struct dmar_domain *alloc_domain(unsigned int type)
domain->use_first_level = true;
domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
+ INIT_LIST_HEAD(&domain->dev_pasids);
spin_lock_init(&domain->lock);
xa_init(&domain->iommu_array);
@@ -1941,7 +1986,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
context_pdts(pds);
/* Setup the RID_PASID field: */
- context_set_sm_rid2pasid(context, PASID_RID2PASID);
+ context_set_sm_rid2pasid(context, IOMMU_NO_PASID);
/*
* Setup the Device-TLB enable bit and Page request
@@ -2363,8 +2408,8 @@ static int __init si_domain_init(int hw)
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
ret = iommu_domain_identity_map(si_domain,
- mm_to_dma_pfn(start_pfn),
- mm_to_dma_pfn(end_pfn));
+ mm_to_dma_pfn_start(start_pfn),
+ mm_to_dma_pfn_end(end_pfn));
if (ret)
return ret;
}
@@ -2385,8 +2430,8 @@ static int __init si_domain_init(int hw)
continue;
ret = iommu_domain_identity_map(si_domain,
- mm_to_dma_pfn(start >> PAGE_SHIFT),
- mm_to_dma_pfn(end >> PAGE_SHIFT));
+ mm_to_dma_pfn_start(start >> PAGE_SHIFT),
+ mm_to_dma_pfn_end(end >> PAGE_SHIFT));
if (ret)
return ret;
}
@@ -2421,13 +2466,13 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
/* Setup the PASID entry for requests without PASID: */
if (hw_pass_through && domain_type_is_si(domain))
ret = intel_pasid_setup_pass_through(iommu, domain,
- dev, PASID_RID2PASID);
+ dev, IOMMU_NO_PASID);
else if (domain->use_first_level)
ret = domain_setup_first_level(iommu, domain, dev,
- PASID_RID2PASID);
+ IOMMU_NO_PASID);
else
ret = intel_pasid_setup_second_level(iommu, domain,
- dev, PASID_RID2PASID);
+ dev, IOMMU_NO_PASID);
if (ret) {
dev_err(dev, "Setup RID2PASID failed\n");
device_block_translation(dev);
@@ -2447,30 +2492,6 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
return 0;
}
-static bool device_has_rmrr(struct device *dev)
-{
- struct dmar_rmrr_unit *rmrr;
- struct device *tmp;
- int i;
-
- rcu_read_lock();
- for_each_rmrr_units(rmrr) {
- /*
- * Return TRUE if this RMRR contains the device that
- * is passed in.
- */
- for_each_active_dev_scope(rmrr->devices,
- rmrr->devices_cnt, i, tmp)
- if (tmp == dev ||
- is_downstream_to_pci_bridge(dev, tmp)) {
- rcu_read_unlock();
- return true;
- }
- }
- rcu_read_unlock();
- return false;
-}
-
/**
* device_rmrr_is_relaxable - Test whether the RMRR of this device
* is relaxable (ie. is allowed to be not enforced under some conditions)
@@ -2501,34 +2522,6 @@ static bool device_rmrr_is_relaxable(struct device *dev)
}
/*
- * There are a couple cases where we need to restrict the functionality of
- * devices associated with RMRRs. The first is when evaluating a device for
- * identity mapping because problems exist when devices are moved in and out
- * of domains and their respective RMRR information is lost. This means that
- * a device with associated RMRRs will never be in a "passthrough" domain.
- * The second is use of the device through the IOMMU API. This interface
- * expects to have full control of the IOVA space for the device. We cannot
- * satisfy both the requirement that RMRR access is maintained and have an
- * unencumbered IOVA space. We also have no ability to quiesce the device's
- * use of the RMRR space or even inform the IOMMU API user of the restriction.
- * We therefore prevent devices associated with an RMRR from participating in
- * the IOMMU API, which eliminates them from device assignment.
- *
- * In both cases, devices which have relaxable RMRRs are not concerned by this
- * restriction. See device_rmrr_is_relaxable comment.
- */
-static bool device_is_rmrr_locked(struct device *dev)
-{
- if (!device_has_rmrr(dev))
- return false;
-
- if (device_rmrr_is_relaxable(dev))
- return false;
-
- return true;
-}
-
-/*
* Return the required default domain type for a specific device.
*
* @dev: the device in query
@@ -3005,13 +2998,6 @@ static int iommu_suspend(void)
struct intel_iommu *iommu = NULL;
unsigned long flag;
- for_each_active_iommu(iommu, drhd) {
- iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32),
- GFP_KERNEL);
- if (!iommu->iommu_state)
- goto nomem;
- }
-
iommu_flush_all();
for_each_active_iommu(iommu, drhd) {
@@ -3031,12 +3017,6 @@ static int iommu_suspend(void)
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
return 0;
-
-nomem:
- for_each_active_iommu(iommu, drhd)
- kfree(iommu->iommu_state);
-
- return -ENOMEM;
}
static void iommu_resume(void)
@@ -3068,9 +3048,6 @@ static void iommu_resume(void)
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
-
- for_each_active_iommu(iommu, drhd)
- kfree(iommu->iommu_state);
}
static struct syscore_ops iommu_syscore_ops = {
@@ -3561,8 +3538,8 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
unsigned long val, void *v)
{
struct memory_notify *mhp = v;
- unsigned long start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
- unsigned long last_vpfn = mm_to_dma_pfn(mhp->start_pfn +
+ unsigned long start_vpfn = mm_to_dma_pfn_start(mhp->start_pfn);
+ unsigned long last_vpfn = mm_to_dma_pfn_end(mhp->start_pfn +
mhp->nr_pages - 1);
switch (val) {
@@ -3757,7 +3734,6 @@ static int __init probe_acpi_namespace_devices(void)
for_each_active_dev_scope(drhd->devices,
drhd->devices_cnt, i, dev) {
struct acpi_device_physical_node *pn;
- struct iommu_group *group;
struct acpi_device *adev;
if (dev->bus != &acpi_bus_type)
@@ -3767,12 +3743,6 @@ static int __init probe_acpi_namespace_devices(void)
mutex_lock(&adev->physical_node_lock);
list_for_each_entry(pn,
&adev->physical_node_list, node) {
- group = iommu_group_get(pn->dev);
- if (group) {
- iommu_group_put(group);
- continue;
- }
-
ret = iommu_probe_device(pn->dev);
if (ret)
break;
@@ -3969,7 +3939,7 @@ static void dmar_remove_one_dev_info(struct device *dev)
if (!dev_is_real_dma_subdevice(info->dev)) {
if (dev_is_pci(info->dev) && sm_supported(iommu))
intel_pasid_tear_down_entry(iommu, info->dev,
- PASID_RID2PASID, false);
+ IOMMU_NO_PASID, false);
iommu_disable_pci_caps(info);
domain_context_clear(info);
@@ -3998,7 +3968,7 @@ static void device_block_translation(struct device *dev)
if (!dev_is_real_dma_subdevice(dev)) {
if (sm_supported(iommu))
intel_pasid_tear_down_entry(iommu, dev,
- PASID_RID2PASID, false);
+ IOMMU_NO_PASID, false);
else
domain_context_clear(info);
}
@@ -4140,12 +4110,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
struct device_domain_info *info = dev_iommu_priv_get(dev);
int ret;
- if (domain->type == IOMMU_DOMAIN_UNMANAGED &&
- device_is_rmrr_locked(dev)) {
- dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
- return -EPERM;
- }
-
if (info->domain)
device_block_translation(dev);
@@ -4272,7 +4236,7 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
unsigned long i;
nrpages = aligned_nrpages(gather->start, size);
- start_pfn = mm_to_dma_pfn(iova_pfn);
+ start_pfn = mm_to_dma_pfn_start(iova_pfn);
xa_for_each(&dmar_domain->iommu_array, i, info)
iommu_flush_iotlb_psi(info->iommu, dmar_domain,
@@ -4332,7 +4296,7 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
list_for_each_entry(info, &domain->devices, link)
intel_pasid_setup_page_snoop_control(info->iommu, info->dev,
- PASID_RID2PASID);
+ IOMMU_NO_PASID);
}
static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
@@ -4714,23 +4678,96 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
{
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
+ struct dev_pasid_info *curr, *dev_pasid = NULL;
+ struct dmar_domain *dmar_domain;
struct iommu_domain *domain;
+ unsigned long flags;
- /* Domain type specific cleanup: */
domain = iommu_get_domain_for_dev_pasid(dev, pasid, 0);
- if (domain) {
- switch (domain->type) {
- case IOMMU_DOMAIN_SVA:
- intel_svm_remove_dev_pasid(dev, pasid);
- break;
- default:
- /* should never reach here */
- WARN_ON(1);
+ if (WARN_ON_ONCE(!domain))
+ goto out_tear_down;
+
+ /*
+ * The SVA implementation needs to handle its own stuffs like the mm
+ * notification. Before consolidating that code into iommu core, let
+ * the intel sva code handle it.
+ */
+ if (domain->type == IOMMU_DOMAIN_SVA) {
+ intel_svm_remove_dev_pasid(dev, pasid);
+ goto out_tear_down;
+ }
+
+ dmar_domain = to_dmar_domain(domain);
+ spin_lock_irqsave(&dmar_domain->lock, flags);
+ list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) {
+ if (curr->dev == dev && curr->pasid == pasid) {
+ list_del(&curr->link_domain);
+ dev_pasid = curr;
break;
}
}
+ WARN_ON_ONCE(!dev_pasid);
+ spin_unlock_irqrestore(&dmar_domain->lock, flags);
+ domain_detach_iommu(dmar_domain, iommu);
+ kfree(dev_pasid);
+out_tear_down:
intel_pasid_tear_down_entry(iommu, dev, pasid, false);
+ intel_drain_pasid_prq(dev, pasid);
+}
+
+static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ struct intel_iommu *iommu = info->iommu;
+ struct dev_pasid_info *dev_pasid;
+ unsigned long flags;
+ int ret;
+
+ if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev))
+ return -EOPNOTSUPP;
+
+ if (context_copied(iommu, info->bus, info->devfn))
+ return -EBUSY;
+
+ ret = prepare_domain_attach_device(domain, dev);
+ if (ret)
+ return ret;
+
+ dev_pasid = kzalloc(sizeof(*dev_pasid), GFP_KERNEL);
+ if (!dev_pasid)
+ return -ENOMEM;
+
+ ret = domain_attach_iommu(dmar_domain, iommu);
+ if (ret)
+ goto out_free;
+
+ if (domain_type_is_si(dmar_domain))
+ ret = intel_pasid_setup_pass_through(iommu, dmar_domain,
+ dev, pasid);
+ else if (dmar_domain->use_first_level)
+ ret = domain_setup_first_level(iommu, dmar_domain,
+ dev, pasid);
+ else
+ ret = intel_pasid_setup_second_level(iommu, dmar_domain,
+ dev, pasid);
+ if (ret)
+ goto out_detach_iommu;
+
+ dev_pasid->dev = dev;
+ dev_pasid->pasid = pasid;
+ spin_lock_irqsave(&dmar_domain->lock, flags);
+ list_add(&dev_pasid->link_domain, &dmar_domain->dev_pasids);
+ spin_unlock_irqrestore(&dmar_domain->lock, flags);
+
+ return 0;
+out_detach_iommu:
+ domain_detach_iommu(dmar_domain, iommu);
+out_free:
+ kfree(dev_pasid);
+ return ret;
}
static void *intel_iommu_hw_info(struct device *dev, u32 *length, u32 *type)
@@ -4770,6 +4807,7 @@ const struct iommu_ops intel_iommu_ops = {
#endif
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = intel_iommu_attach_device,
+ .set_dev_pasid = intel_iommu_set_dev_pasid,
.map_pages = intel_iommu_map_pages,
.unmap_pages = intel_iommu_unmap_pages,
.iotlb_sync_map = intel_iommu_iotlb_sync_map,
@@ -5006,7 +5044,7 @@ void quirk_extra_dev_tlb_flush(struct device_domain_info *info,
return;
sid = PCI_DEVID(info->bus, info->devfn);
- if (pasid == PASID_RID2PASID) {
+ if (pasid == IOMMU_NO_PASID) {
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
qdep, address, mask);
} else {
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index 1c5e1d88862b..7dac94f62b4e 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -595,6 +595,7 @@ struct dmar_domain {
spinlock_t lock; /* Protect device tracking lists */
struct list_head devices; /* all devices' list */
+ struct list_head dev_pasids; /* all attached pasids */
struct dma_pte *pgd; /* virtual address */
int gaw; /* max guest address width */
@@ -680,7 +681,7 @@ struct intel_iommu {
struct iopf_queue *iopf_queue;
unsigned char iopfq_name[16];
struct q_inval *qi; /* Queued invalidation info */
- u32 *iommu_state; /* Store iommu states between suspend and resume.*/
+ u32 iommu_state[MAX_SR_DMAR_REGS]; /* Store iommu states between suspend and resume.*/
#ifdef CONFIG_IRQ_REMAP
struct ir_table *ir_table; /* Interrupt remapping info */
@@ -717,6 +718,12 @@ struct device_domain_info {
struct pasid_table *pasid_table; /* pasid table */
};
+struct dev_pasid_info {
+ struct list_head link_domain; /* link to domain siblings */
+ struct device *dev;
+ ioasid_t pasid;
+};
+
static inline void __iommu_flush_cache(
struct intel_iommu *iommu, void *addr, int size)
{
@@ -844,6 +851,7 @@ int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
struct iommu_page_response *msg);
struct iommu_domain *intel_svm_domain_alloc(void);
void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid);
+void intel_drain_pasid_prq(struct device *dev, u32 pasid);
struct intel_svm_dev {
struct list_head list;
@@ -862,6 +870,7 @@ struct intel_svm {
};
#else
static inline void intel_svm_check(struct intel_iommu *iommu) {}
+static inline void intel_drain_pasid_prq(struct device *dev, u32 pasid) {}
static inline struct iommu_domain *intel_svm_domain_alloc(void)
{
return NULL;
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index c5d479770e12..8f92b92f3d2a 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -129,7 +129,7 @@ int intel_pasid_alloc_table(struct device *dev)
info->pasid_table = pasid_table;
if (!ecap_coherent(info->iommu->ecap))
- clflush_cache_range(pasid_table->table, size);
+ clflush_cache_range(pasid_table->table, (1 << order) * PAGE_SIZE);
return 0;
}
@@ -438,7 +438,7 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
* SVA usage, device could do DMA with multiple PASIDs. It is more
* efficient to flush devTLB specific to the PASID.
*/
- if (pasid == PASID_RID2PASID)
+ if (pasid == IOMMU_NO_PASID)
qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
else
qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT);
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index d6b7d21244b1..4e9e68c3c388 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -10,8 +10,6 @@
#ifndef __INTEL_PASID_H
#define __INTEL_PASID_H
-#define PASID_RID2PASID 0x0
-#define PASID_MIN 0x1
#define PASID_MAX 0x100000
#define PASID_PTE_MASK 0x3F
#define PASID_PTE_PRESENT 1
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 8f6d68006ab6..50a481c895b8 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -26,8 +26,6 @@
#include "trace.h"
static irqreturn_t prq_event_thread(int irq, void *d);
-static void intel_svm_drain_prq(struct device *dev, u32 pasid);
-#define to_intel_svm_dev(handle) container_of(handle, struct intel_svm_dev, sva)
static DEFINE_XARRAY_ALLOC(pasid_private_array);
static int pasid_private_add(ioasid_t pasid, void *priv)
@@ -259,8 +257,6 @@ static const struct mmu_notifier_ops intel_mmuops = {
.arch_invalidate_secondary_tlbs = intel_arch_invalidate_secondary_tlbs,
};
-static DEFINE_MUTEX(pasid_mutex);
-
static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
struct intel_svm **rsvm,
struct intel_svm_dev **rsdev)
@@ -268,10 +264,6 @@ static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
struct intel_svm_dev *sdev = NULL;
struct intel_svm *svm;
- /* The caller should hold the pasid_mutex lock */
- if (WARN_ON(!mutex_is_locked(&pasid_mutex)))
- return -EINVAL;
-
if (pasid == IOMMU_PASID_INVALID || pasid >= PASID_MAX)
return -EINVAL;
@@ -371,37 +363,23 @@ free_svm:
return ret;
}
-/* Caller must hold pasid_mutex */
-static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
+void intel_svm_remove_dev_pasid(struct device *dev, u32 pasid)
{
struct intel_svm_dev *sdev;
struct intel_iommu *iommu;
struct intel_svm *svm;
struct mm_struct *mm;
- int ret = -EINVAL;
iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu)
- goto out;
+ return;
- ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
- if (ret)
- goto out;
+ if (pasid_to_svm_sdev(dev, pasid, &svm, &sdev))
+ return;
mm = svm->mm;
if (sdev) {
list_del_rcu(&sdev->list);
- /*
- * Flush the PASID cache and IOTLB for this device.
- * Note that we do depend on the hardware *not* using
- * the PASID any more. Just as we depend on other
- * devices never using PASIDs that they have no right
- * to use. We have a *shared* PASID table, because it's
- * large and has to be physically contiguous. So it's
- * hard to be as defensive as we might like.
- */
- intel_pasid_tear_down_entry(iommu, dev, svm->pasid, false);
- intel_svm_drain_prq(dev, svm->pasid);
kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) {
@@ -418,8 +396,6 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
kfree(svm);
}
}
-out:
- return ret;
}
/* Page request queue descriptor */
@@ -460,7 +436,7 @@ static bool is_canonical_address(u64 addr)
}
/**
- * intel_svm_drain_prq - Drain page requests and responses for a pasid
+ * intel_drain_pasid_prq - Drain page requests and responses for a pasid
* @dev: target device
* @pasid: pasid for draining
*
@@ -474,7 +450,7 @@ static bool is_canonical_address(u64 addr)
* described in VT-d spec CH7.10 to drain all page requests and page
* responses pending in the hardware.
*/
-static void intel_svm_drain_prq(struct device *dev, u32 pasid)
+void intel_drain_pasid_prq(struct device *dev, u32 pasid)
{
struct device_domain_info *info;
struct dmar_domain *domain;
@@ -520,19 +496,7 @@ prq_retry:
goto prq_retry;
}
- /*
- * A work in IO page fault workqueue may try to lock pasid_mutex now.
- * Holding pasid_mutex while waiting in iopf_queue_flush_dev() for
- * all works in the workqueue to finish may cause deadlock.
- *
- * It's unnecessary to hold pasid_mutex in iopf_queue_flush_dev().
- * Unlock it to allow the works to be handled while waiting for
- * them to finish.
- */
- lockdep_assert_held(&pasid_mutex);
- mutex_unlock(&pasid_mutex);
iopf_queue_flush_dev(dev);
- mutex_lock(&pasid_mutex);
/*
* Perform steps described in VT-d spec CH7.10 to drain page
@@ -827,26 +791,14 @@ out:
return ret;
}
-void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid)
-{
- mutex_lock(&pasid_mutex);
- intel_svm_unbind_mm(dev, pasid);
- mutex_unlock(&pasid_mutex);
-}
-
static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
struct mm_struct *mm = domain->mm;
- int ret;
- mutex_lock(&pasid_mutex);
- ret = intel_svm_bind_mm(iommu, dev, mm);
- mutex_unlock(&pasid_mutex);
-
- return ret;
+ return intel_svm_bind_mm(iommu, dev, mm);
}
static void intel_svm_domain_free(struct iommu_domain *domain)
diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
index 05c0fb2acbc4..b78671a8a914 100644
--- a/drivers/iommu/iommu-sva.c
+++ b/drivers/iommu/iommu-sva.c
@@ -10,34 +10,30 @@
#include "iommu-sva.h"
static DEFINE_MUTEX(iommu_sva_lock);
-static DEFINE_IDA(iommu_global_pasid_ida);
/* Allocate a PASID for the mm within range (inclusive) */
-static int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
+static int iommu_sva_alloc_pasid(struct mm_struct *mm, struct device *dev)
{
+ ioasid_t pasid;
int ret = 0;
- if (min == IOMMU_PASID_INVALID ||
- max == IOMMU_PASID_INVALID ||
- min == 0 || max < min)
- return -EINVAL;
-
if (!arch_pgtable_dma_compat(mm))
return -EBUSY;
mutex_lock(&iommu_sva_lock);
/* Is a PASID already associated with this mm? */
if (mm_valid_pasid(mm)) {
- if (mm->pasid < min || mm->pasid > max)
+ if (mm->pasid >= dev->iommu->max_pasids)
ret = -EOVERFLOW;
goto out;
}
- ret = ida_alloc_range(&iommu_global_pasid_ida, min, max, GFP_KERNEL);
- if (ret < 0)
+ pasid = iommu_alloc_global_pasid(dev);
+ if (pasid == IOMMU_PASID_INVALID) {
+ ret = -ENOSPC;
goto out;
-
- mm->pasid = ret;
+ }
+ mm->pasid = pasid;
ret = 0;
out:
mutex_unlock(&iommu_sva_lock);
@@ -64,15 +60,10 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
{
struct iommu_domain *domain;
struct iommu_sva *handle;
- ioasid_t max_pasids;
int ret;
- max_pasids = dev->iommu->max_pasids;
- if (!max_pasids)
- return ERR_PTR(-EOPNOTSUPP);
-
/* Allocate mm->pasid if necessary. */
- ret = iommu_sva_alloc_pasid(mm, 1, max_pasids - 1);
+ ret = iommu_sva_alloc_pasid(mm, dev);
if (ret)
return ERR_PTR(ret);
@@ -217,5 +208,5 @@ void mm_pasid_drop(struct mm_struct *mm)
if (likely(!mm_valid_pasid(mm)))
return;
- ida_free(&iommu_global_pasid_ida, mm->pasid);
+ iommu_free_global_pasid(mm->pasid);
}
diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c
index 99869217fbec..cbe378c34ba3 100644
--- a/drivers/iommu/iommu-sysfs.c
+++ b/drivers/iommu/iommu-sysfs.c
@@ -107,9 +107,6 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link)
{
int ret;
- if (!iommu || IS_ERR(iommu))
- return -ENODEV;
-
ret = sysfs_add_link_to_group(&iommu->dev->kobj, "devices",
&link->kobj, dev_name(link));
if (ret)
@@ -122,14 +119,9 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link)
return ret;
}
-EXPORT_SYMBOL_GPL(iommu_device_link);
void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
{
- if (!iommu || IS_ERR(iommu))
- return;
-
sysfs_remove_link(&link->kobj, "iommu");
sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link));
}
-EXPORT_SYMBOL_GPL(iommu_device_unlink);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index c9c370de9d3d..3bfc56df4f78 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -41,6 +41,7 @@
static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
+static DEFINE_IDA(iommu_global_pasid_ida);
static unsigned int iommu_def_domain_type __read_mostly;
static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT);
@@ -129,9 +130,12 @@ static int iommu_setup_default_domain(struct iommu_group *group,
int target_type);
static int iommu_create_device_direct_mappings(struct iommu_domain *domain,
struct device *dev);
-static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
static ssize_t iommu_group_store_type(struct iommu_group *group,
const char *buf, size_t count);
+static struct group_device *iommu_group_alloc_device(struct iommu_group *group,
+ struct device *dev);
+static void __iommu_group_free_device(struct iommu_group *group,
+ struct group_device *grp_dev);
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
struct iommu_group_attribute iommu_group_attr_##_name = \
@@ -377,28 +381,18 @@ static u32 dev_iommu_get_max_pasids(struct device *dev)
return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids);
}
-static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
+/*
+ * Init the dev->iommu and dev->iommu_group in the struct device and get the
+ * driver probed
+ */
+static int iommu_init_device(struct device *dev, const struct iommu_ops *ops)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
struct iommu_device *iommu_dev;
struct iommu_group *group;
- static DEFINE_MUTEX(iommu_probe_device_lock);
int ret;
- if (!ops)
- return -ENODEV;
- /*
- * Serialise to avoid races between IOMMU drivers registering in
- * parallel and/or the "replay" calls from ACPI/OF code via client
- * driver probe. Once the latter have been cleaned up we should
- * probably be able to use device_lock() here to minimise the scope,
- * but for now enforcing a simple global ordering is fine.
- */
- mutex_lock(&iommu_probe_device_lock);
- if (!dev_iommu_get(dev)) {
- ret = -ENOMEM;
- goto err_unlock;
- }
+ if (!dev_iommu_get(dev))
+ return -ENOMEM;
if (!try_module_get(ops->owner)) {
ret = -EINVAL;
@@ -408,124 +402,184 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
iommu_dev = ops->probe_device(dev);
if (IS_ERR(iommu_dev)) {
ret = PTR_ERR(iommu_dev);
- goto out_module_put;
+ goto err_module_put;
}
- dev->iommu->iommu_dev = iommu_dev;
- dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev);
- if (ops->is_attach_deferred)
- dev->iommu->attach_deferred = ops->is_attach_deferred(dev);
+ ret = iommu_device_link(iommu_dev, dev);
+ if (ret)
+ goto err_release;
- group = iommu_group_get_for_dev(dev);
+ group = ops->device_group(dev);
+ if (WARN_ON_ONCE(group == NULL))
+ group = ERR_PTR(-EINVAL);
if (IS_ERR(group)) {
ret = PTR_ERR(group);
- goto out_release;
+ goto err_unlink;
}
+ dev->iommu_group = group;
- mutex_lock(&group->mutex);
- if (group_list && !group->default_domain && list_empty(&group->entry))
- list_add_tail(&group->entry, group_list);
- mutex_unlock(&group->mutex);
- iommu_group_put(group);
-
- mutex_unlock(&iommu_probe_device_lock);
- iommu_device_link(iommu_dev, dev);
-
+ dev->iommu->iommu_dev = iommu_dev;
+ dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev);
+ if (ops->is_attach_deferred)
+ dev->iommu->attach_deferred = ops->is_attach_deferred(dev);
return 0;
-out_release:
+err_unlink:
+ iommu_device_unlink(iommu_dev, dev);
+err_release:
if (ops->release_device)
ops->release_device(dev);
-
-out_module_put:
+err_module_put:
module_put(ops->owner);
-
err_free:
dev_iommu_free(dev);
+ return ret;
+}
-err_unlock:
- mutex_unlock(&iommu_probe_device_lock);
+static void iommu_deinit_device(struct device *dev)
+{
+ struct iommu_group *group = dev->iommu_group;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
- return ret;
+ lockdep_assert_held(&group->mutex);
+
+ iommu_device_unlink(dev->iommu->iommu_dev, dev);
+
+ /*
+ * release_device() must stop using any attached domain on the device.
+ * If there are still other devices in the group they are not effected
+ * by this callback.
+ *
+ * The IOMMU driver must set the device to either an identity or
+ * blocking translation and stop using any domain pointer, as it is
+ * going to be freed.
+ */
+ if (ops->release_device)
+ ops->release_device(dev);
+
+ /*
+ * If this is the last driver to use the group then we must free the
+ * domains before we do the module_put().
+ */
+ if (list_empty(&group->devices)) {
+ if (group->default_domain) {
+ iommu_domain_free(group->default_domain);
+ group->default_domain = NULL;
+ }
+ if (group->blocking_domain) {
+ iommu_domain_free(group->blocking_domain);
+ group->blocking_domain = NULL;
+ }
+ group->domain = NULL;
+ }
+
+ /* Caller must put iommu_group */
+ dev->iommu_group = NULL;
+ module_put(ops->owner);
+ dev_iommu_free(dev);
}
-int iommu_probe_device(struct device *dev)
+static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
{
- const struct iommu_ops *ops;
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
struct iommu_group *group;
+ static DEFINE_MUTEX(iommu_probe_device_lock);
+ struct group_device *gdev;
int ret;
- ret = __iommu_probe_device(dev, NULL);
- if (ret)
- goto err_out;
+ if (!ops)
+ return -ENODEV;
+ /*
+ * Serialise to avoid races between IOMMU drivers registering in
+ * parallel and/or the "replay" calls from ACPI/OF code via client
+ * driver probe. Once the latter have been cleaned up we should
+ * probably be able to use device_lock() here to minimise the scope,
+ * but for now enforcing a simple global ordering is fine.
+ */
+ mutex_lock(&iommu_probe_device_lock);
- group = iommu_group_get(dev);
- if (!group) {
- ret = -ENODEV;
- goto err_release;
+ /* Device is probed already if in a group */
+ if (dev->iommu_group) {
+ ret = 0;
+ goto out_unlock;
}
+ ret = iommu_init_device(dev, ops);
+ if (ret)
+ goto out_unlock;
+
+ group = dev->iommu_group;
+ gdev = iommu_group_alloc_device(group, dev);
mutex_lock(&group->mutex);
+ if (IS_ERR(gdev)) {
+ ret = PTR_ERR(gdev);
+ goto err_put_group;
+ }
+ /*
+ * The gdev must be in the list before calling
+ * iommu_setup_default_domain()
+ */
+ list_add_tail(&gdev->list, &group->devices);
+ WARN_ON(group->default_domain && !group->domain);
if (group->default_domain)
iommu_create_device_direct_mappings(group->default_domain, dev);
-
if (group->domain) {
ret = __iommu_device_set_domain(group, dev, group->domain, 0);
if (ret)
- goto err_unlock;
- } else if (!group->default_domain) {
+ goto err_remove_gdev;
+ } else if (!group->default_domain && !group_list) {
ret = iommu_setup_default_domain(group, 0);
if (ret)
- goto err_unlock;
+ goto err_remove_gdev;
+ } else if (!group->default_domain) {
+ /*
+ * With a group_list argument we defer the default_domain setup
+ * to the caller by providing a de-duplicated list of groups
+ * that need further setup.
+ */
+ if (list_empty(&group->entry))
+ list_add_tail(&group->entry, group_list);
}
-
mutex_unlock(&group->mutex);
- iommu_group_put(group);
+ mutex_unlock(&iommu_probe_device_lock);
- ops = dev_iommu_ops(dev);
- if (ops->probe_finalize)
- ops->probe_finalize(dev);
+ if (dev_is_pci(dev))
+ iommu_dma_set_pci_32bit_workaround(dev);
return 0;
-err_unlock:
+err_remove_gdev:
+ list_del(&gdev->list);
+ __iommu_group_free_device(group, gdev);
+err_put_group:
+ iommu_deinit_device(dev);
mutex_unlock(&group->mutex);
iommu_group_put(group);
-err_release:
- iommu_release_device(dev);
+out_unlock:
+ mutex_unlock(&iommu_probe_device_lock);
-err_out:
return ret;
-
}
-/*
- * Remove a device from a group's device list and return the group device
- * if successful.
- */
-static struct group_device *
-__iommu_group_remove_device(struct iommu_group *group, struct device *dev)
+int iommu_probe_device(struct device *dev)
{
- struct group_device *device;
+ const struct iommu_ops *ops;
+ int ret;
- lockdep_assert_held(&group->mutex);
- for_each_group_device(group, device) {
- if (device->dev == dev) {
- list_del(&device->list);
- return device;
- }
- }
+ ret = __iommu_probe_device(dev, NULL);
+ if (ret)
+ return ret;
- return NULL;
+ ops = dev_iommu_ops(dev);
+ if (ops->probe_finalize)
+ ops->probe_finalize(dev);
+
+ return 0;
}
-/*
- * Release a device from its group and decrements the iommu group reference
- * count.
- */
-static void __iommu_group_release_device(struct iommu_group *group,
- struct group_device *grp_dev)
+static void __iommu_group_free_device(struct iommu_group *group,
+ struct group_device *grp_dev)
{
struct device *dev = grp_dev->dev;
@@ -534,54 +588,57 @@ static void __iommu_group_release_device(struct iommu_group *group,
trace_remove_device_from_group(group->id, dev);
+ /*
+ * If the group has become empty then ownership must have been
+ * released, and the current domain must be set back to NULL or
+ * the default domain.
+ */
+ if (list_empty(&group->devices))
+ WARN_ON(group->owner_cnt ||
+ group->domain != group->default_domain);
+
kfree(grp_dev->name);
kfree(grp_dev);
- dev->iommu_group = NULL;
- kobject_put(group->devices_kobj);
}
-static void iommu_release_device(struct device *dev)
+/* Remove the iommu_group from the struct device. */
+static void __iommu_group_remove_device(struct device *dev)
{
struct iommu_group *group = dev->iommu_group;
struct group_device *device;
- const struct iommu_ops *ops;
-
- if (!dev->iommu || !group)
- return;
-
- iommu_device_unlink(dev->iommu->iommu_dev, dev);
mutex_lock(&group->mutex);
- device = __iommu_group_remove_device(group, dev);
+ for_each_group_device(group, device) {
+ if (device->dev != dev)
+ continue;
- /*
- * If the group has become empty then ownership must have been released,
- * and the current domain must be set back to NULL or the default
- * domain.
- */
- if (list_empty(&group->devices))
- WARN_ON(group->owner_cnt ||
- group->domain != group->default_domain);
+ list_del(&device->list);
+ __iommu_group_free_device(group, device);
+ if (dev->iommu && dev->iommu->iommu_dev)
+ iommu_deinit_device(dev);
+ else
+ dev->iommu_group = NULL;
+ break;
+ }
+ mutex_unlock(&group->mutex);
/*
- * release_device() must stop using any attached domain on the device.
- * If there are still other devices in the group they are not effected
- * by this callback.
- *
- * The IOMMU driver must set the device to either an identity or
- * blocking translation and stop using any domain pointer, as it is
- * going to be freed.
+ * Pairs with the get in iommu_init_device() or
+ * iommu_group_add_device()
*/
- ops = dev_iommu_ops(dev);
- if (ops->release_device)
- ops->release_device(dev);
- mutex_unlock(&group->mutex);
+ iommu_group_put(group);
+}
- if (device)
- __iommu_group_release_device(group, device);
+static void iommu_release_device(struct device *dev)
+{
+ struct iommu_group *group = dev->iommu_group;
- module_put(ops->owner);
- dev_iommu_free(dev);
+ if (group)
+ __iommu_group_remove_device(dev);
+
+ /* Free any fwspec if no iommu_driver was ever attached */
+ if (dev->iommu)
+ dev_iommu_free(dev);
}
static int __init iommu_set_def_domain_type(char *str)
@@ -842,10 +899,9 @@ static void iommu_group_release(struct kobject *kobj)
ida_free(&iommu_group_ida, group->id);
- if (group->default_domain)
- iommu_domain_free(group->default_domain);
- if (group->blocking_domain)
- iommu_domain_free(group->blocking_domain);
+ /* Domains are free'd by iommu_deinit_device() */
+ WARN_ON(group->default_domain);
+ WARN_ON(group->blocking_domain);
kfree(group->name);
kfree(group);
@@ -1003,14 +1059,12 @@ static int iommu_create_device_direct_mappings(struct iommu_domain *domain,
unsigned long pg_size;
int ret = 0;
- if (!iommu_is_dma_domain(domain))
- return 0;
-
- BUG_ON(!domain->pgsize_bitmap);
-
- pg_size = 1UL << __ffs(domain->pgsize_bitmap);
+ pg_size = domain->pgsize_bitmap ? 1UL << __ffs(domain->pgsize_bitmap) : 0;
INIT_LIST_HEAD(&mappings);
+ if (WARN_ON_ONCE(iommu_is_dma_domain(domain) && !pg_size))
+ return -EINVAL;
+
iommu_get_resv_regions(dev, &mappings);
/* We need to consider overlapping regions for different devices */
@@ -1018,13 +1072,17 @@ static int iommu_create_device_direct_mappings(struct iommu_domain *domain,
dma_addr_t start, end, addr;
size_t map_size = 0;
- start = ALIGN(entry->start, pg_size);
- end = ALIGN(entry->start + entry->length, pg_size);
+ if (entry->type == IOMMU_RESV_DIRECT)
+ dev->iommu->require_direct = 1;
- if (entry->type != IOMMU_RESV_DIRECT &&
- entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
+ if ((entry->type != IOMMU_RESV_DIRECT &&
+ entry->type != IOMMU_RESV_DIRECT_RELAXABLE) ||
+ !iommu_is_dma_domain(domain))
continue;
+ start = ALIGN(entry->start, pg_size);
+ end = ALIGN(entry->start + entry->length, pg_size);
+
for (addr = start; addr <= end; addr += pg_size) {
phys_addr_t phys_addr;
@@ -1058,22 +1116,16 @@ out:
return ret;
}
-/**
- * iommu_group_add_device - add a device to an iommu group
- * @group: the group into which to add the device (reference should be held)
- * @dev: the device
- *
- * This function is called by an iommu driver to add a device into a
- * group. Adding a device increments the group reference count.
- */
-int iommu_group_add_device(struct iommu_group *group, struct device *dev)
+/* This is undone by __iommu_group_free_device() */
+static struct group_device *iommu_group_alloc_device(struct iommu_group *group,
+ struct device *dev)
{
int ret, i = 0;
struct group_device *device;
device = kzalloc(sizeof(*device), GFP_KERNEL);
if (!device)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
device->dev = dev;
@@ -1104,18 +1156,11 @@ rename:
goto err_free_name;
}
- kobject_get(group->devices_kobj);
-
- dev->iommu_group = group;
-
- mutex_lock(&group->mutex);
- list_add_tail(&device->list, &group->devices);
- mutex_unlock(&group->mutex);
trace_add_device_to_group(group->id, dev);
dev_info(dev, "Adding to iommu group %d\n", group->id);
- return 0;
+ return device;
err_free_name:
kfree(device->name);
@@ -1124,7 +1169,32 @@ err_remove_link:
err_free_device:
kfree(device);
dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
- return ret;
+ return ERR_PTR(ret);
+}
+
+/**
+ * iommu_group_add_device - add a device to an iommu group
+ * @group: the group into which to add the device (reference should be held)
+ * @dev: the device
+ *
+ * This function is called by an iommu driver to add a device into a
+ * group. Adding a device increments the group reference count.
+ */
+int iommu_group_add_device(struct iommu_group *group, struct device *dev)
+{
+ struct group_device *gdev;
+
+ gdev = iommu_group_alloc_device(group, dev);
+ if (IS_ERR(gdev))
+ return PTR_ERR(gdev);
+
+ iommu_group_ref_get(group);
+ dev->iommu_group = group;
+
+ mutex_lock(&group->mutex);
+ list_add_tail(&gdev->list, &group->devices);
+ mutex_unlock(&group->mutex);
+ return 0;
}
EXPORT_SYMBOL_GPL(iommu_group_add_device);
@@ -1138,19 +1208,13 @@ EXPORT_SYMBOL_GPL(iommu_group_add_device);
void iommu_group_remove_device(struct device *dev)
{
struct iommu_group *group = dev->iommu_group;
- struct group_device *device;
if (!group)
return;
dev_info(dev, "Removing from iommu group %d\n", group->id);
- mutex_lock(&group->mutex);
- device = __iommu_group_remove_device(group, dev);
- mutex_unlock(&group->mutex);
-
- if (device)
- __iommu_group_release_device(group, device);
+ __iommu_group_remove_device(dev);
}
EXPORT_SYMBOL_GPL(iommu_group_remove_device);
@@ -1708,45 +1772,6 @@ iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
return dom;
}
-/**
- * iommu_group_get_for_dev - Find or create the IOMMU group for a device
- * @dev: target device
- *
- * This function is intended to be called by IOMMU drivers and extended to
- * support common, bus-defined algorithms when determining or creating the
- * IOMMU group for a device. On success, the caller will hold a reference
- * to the returned IOMMU group, which will already include the provided
- * device. The reference should be released with iommu_group_put().
- */
-static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
-{
- const struct iommu_ops *ops = dev_iommu_ops(dev);
- struct iommu_group *group;
- int ret;
-
- group = iommu_group_get(dev);
- if (group)
- return group;
-
- group = ops->device_group(dev);
- if (WARN_ON_ONCE(group == NULL))
- return ERR_PTR(-EINVAL);
-
- if (IS_ERR(group))
- return group;
-
- ret = iommu_group_add_device(group, dev);
- if (ret)
- goto out_put_group;
-
- return group;
-
-out_put_group:
- iommu_group_put(group);
-
- return ERR_PTR(ret);
-}
-
struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
{
return group->default_domain;
@@ -1755,16 +1780,8 @@ struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
static int probe_iommu_group(struct device *dev, void *data)
{
struct list_head *group_list = data;
- struct iommu_group *group;
int ret;
- /* Device is probed already if in a group */
- group = iommu_group_get(dev);
- if (group) {
- iommu_group_put(group);
- return 0;
- }
-
ret = __iommu_probe_device(dev, group_list);
if (ret == -ENODEV)
ret = 0;
@@ -1840,11 +1857,6 @@ int bus_iommu_probe(const struct bus_type *bus)
LIST_HEAD(group_list);
int ret;
- /*
- * This code-path does not allocate the default domain when
- * creating the iommu group, so do it after the groups are
- * created.
- */
ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group);
if (ret)
return ret;
@@ -1857,6 +1869,11 @@ int bus_iommu_probe(const struct bus_type *bus)
/* Remove item from the list */
list_del_init(&group->entry);
+ /*
+ * We go to the trouble of deferred default domain creation so
+ * that the cross-group default domain type and the setup of the
+ * IOMMU_RESV_DIRECT will work correctly in non-hotpug scenarios.
+ */
ret = iommu_setup_default_domain(group, 0);
if (ret) {
mutex_unlock(&group->mutex);
@@ -2191,6 +2208,21 @@ static int __iommu_device_set_domain(struct iommu_group *group,
{
int ret;
+ /*
+ * If the device requires IOMMU_RESV_DIRECT then we cannot allow
+ * the blocking domain to be attached as it does not contain the
+ * required 1:1 mapping. This test effectively excludes the device
+ * being used with iommu_group_claim_dma_owner() which will block
+ * vfio and iommufd as well.
+ */
+ if (dev->iommu->require_direct &&
+ (new_domain->type == IOMMU_DOMAIN_BLOCKED ||
+ new_domain == group->blocking_domain)) {
+ dev_warn(dev,
+ "Firmware has requested this device have a 1:1 IOMMU mapping, rejecting configuring the device without a 1:1 mapping. Contact your platform vendor.\n");
+ return -EINVAL;
+ }
+
if (dev->iommu->attach_deferred) {
if (new_domain == group->default_domain)
return 0;
@@ -3282,7 +3314,7 @@ static void __iommu_release_dma_ownership(struct iommu_group *group)
/**
* iommu_group_release_dma_owner() - Release DMA ownership of a group
- * @dev: The device
+ * @group: The group
*
* Release the DMA ownership claimed by iommu_group_claim_dma_owner().
*/
@@ -3296,7 +3328,7 @@ EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner);
/**
* iommu_device_release_dma_owner() - Release DMA ownership of a device
- * @group: The device.
+ * @dev: The device.
*
* Release the DMA ownership claimed by iommu_device_claim_dma_owner().
*/
@@ -3479,3 +3511,30 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
return domain;
}
+
+ioasid_t iommu_alloc_global_pasid(struct device *dev)
+{
+ int ret;
+
+ /* max_pasids == 0 means that the device does not support PASID */
+ if (!dev->iommu->max_pasids)
+ return IOMMU_PASID_INVALID;
+
+ /*
+ * max_pasids is set up by vendor driver based on number of PASID bits
+ * supported but the IDA allocation is inclusive.
+ */
+ ret = ida_alloc_range(&iommu_global_pasid_ida, IOMMU_FIRST_GLOBAL_PASID,
+ dev->iommu->max_pasids - 1, GFP_KERNEL);
+ return ret < 0 ? IOMMU_PASID_INVALID : ret;
+}
+EXPORT_SYMBOL_GPL(iommu_alloc_global_pasid);
+
+void iommu_free_global_pasid(ioasid_t pasid)
+{
+ if (WARN_ON(pasid == IOMMU_PASID_INVALID))
+ return;
+
+ ida_free(&iommu_global_pasid_ida, pasid);
+}
+EXPORT_SYMBOL_GPL(iommu_free_global_pasid);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 9f64c5c9f5b9..65ff69477c43 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -14,11 +14,12 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/io-pgtable.h>
#include <linux/iommu.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -253,17 +254,13 @@ static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu,
/* Wait for any pending TLB invalidations to complete */
static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
{
- unsigned int count = 0;
+ u32 val;
- while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) {
- cpu_relax();
- if (++count == TLB_LOOP_TIMEOUT) {
- dev_err_ratelimited(domain->mmu->dev,
+ if (read_poll_timeout_atomic(ipmmu_ctx_read_root, val,
+ !(val & IMCTR_FLUSH), 1, TLB_LOOP_TIMEOUT,
+ false, domain, IMCTR))
+ dev_err_ratelimited(domain->mmu->dev,
"TLB sync timed out -- MMU may be deadlocked\n");
- return;
- }
- udelay(1);
- }
}
static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
@@ -723,6 +720,10 @@ static bool ipmmu_device_is_allowed(struct device *dev)
if (soc_device_match(soc_denylist))
return false;
+ /* Check whether this device is a PCI device */
+ if (dev_is_pci(dev))
+ return true;
+
/* Check whether this device can work with the IPMMU */
for (i = 0; i < ARRAY_SIZE(devices_allowlist); i++) {
if (!strcmp(dev_name(dev), devices_allowlist[i]))
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index e93906d6e112..fab6c347ce57 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -3,6 +3,7 @@
* Copyright (c) 2015-2016 MediaTek Inc.
* Author: Yong Wu <yong.wu@mediatek.com>
*/
+#include <linux/arm-smccc.h>
#include <linux/bitfield.h>
#include <linux/bug.h>
#include <linux/clk.h>
@@ -27,6 +28,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/soc/mediatek/infracfg.h>
+#include <linux/soc/mediatek/mtk_sip_svc.h>
#include <asm/barrier.h>
#include <soc/mediatek/smi.h>
@@ -143,6 +145,7 @@
#define PGTABLE_PA_35_EN BIT(17)
#define TF_PORT_TO_ADDR_MT8173 BIT(18)
#define INT_ID_PORT_WIDTH_6 BIT(19)
+#define CFG_IFA_MASTER_IN_ATF BIT(20)
#define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask) \
((((pdata)->flags) & (mask)) == (_x))
@@ -167,6 +170,7 @@ enum mtk_iommu_plat {
M4U_MT8173,
M4U_MT8183,
M4U_MT8186,
+ M4U_MT8188,
M4U_MT8192,
M4U_MT8195,
M4U_MT8365,
@@ -258,6 +262,8 @@ struct mtk_iommu_data {
struct device *smicomm_dev;
struct mtk_iommu_bank_data *bank;
+ struct mtk_iommu_domain *share_dom;
+
struct regmap *pericfg;
struct mutex mutex; /* Protect m4u_group/m4u_dom above */
@@ -577,41 +583,55 @@ static int mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev,
unsigned int larbid, portid;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
const struct mtk_iommu_iova_region *region;
- u32 peri_mmuen, peri_mmuen_msk;
+ unsigned long portid_msk = 0;
+ struct arm_smccc_res res;
int i, ret = 0;
for (i = 0; i < fwspec->num_ids; ++i) {
- larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
+ portid_msk |= BIT(portid);
+ }
- if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
- larb_mmu = &data->larb_imu[larbid];
+ if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
+ /* All ports should be in the same larb. just use 0 here */
+ larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
+ larb_mmu = &data->larb_imu[larbid];
+ region = data->plat_data->iova_region + regionid;
- region = data->plat_data->iova_region + regionid;
+ for_each_set_bit(portid, &portid_msk, 32)
larb_mmu->bank[portid] = upper_32_bits(region->iova_base);
- dev_dbg(dev, "%s iommu for larb(%s) port %d region %d rgn-bank %d.\n",
- enable ? "enable" : "disable", dev_name(larb_mmu->dev),
- portid, regionid, larb_mmu->bank[portid]);
+ dev_dbg(dev, "%s iommu for larb(%s) port 0x%lx region %d rgn-bank %d.\n",
+ enable ? "enable" : "disable", dev_name(larb_mmu->dev),
+ portid_msk, regionid, upper_32_bits(region->iova_base));
- if (enable)
- larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
- else
- larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
- } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) {
- peri_mmuen_msk = BIT(portid);
+ if (enable)
+ larb_mmu->mmu |= portid_msk;
+ else
+ larb_mmu->mmu &= ~portid_msk;
+ } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) {
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, CFG_IFA_MASTER_IN_ATF)) {
+ arm_smccc_smc(MTK_SIP_KERNEL_IOMMU_CONTROL,
+ IOMMU_ATF_CMD_CONFIG_INFRA_IOMMU,
+ portid_msk, enable, 0, 0, 0, 0, &res);
+ ret = res.a0;
+ } else {
/* PCI dev has only one output id, enable the next writing bit for PCIe */
- if (dev_is_pci(dev))
- peri_mmuen_msk |= BIT(portid + 1);
+ if (dev_is_pci(dev)) {
+ if (fwspec->num_ids != 1) {
+ dev_err(dev, "PCI dev can only have one port.\n");
+ return -ENODEV;
+ }
+ portid_msk |= BIT(portid + 1);
+ }
- peri_mmuen = enable ? peri_mmuen_msk : 0;
ret = regmap_update_bits(data->pericfg, PERICFG_IOMMU_1,
- peri_mmuen_msk, peri_mmuen);
- if (ret)
- dev_err(dev, "%s iommu(%s) inframaster 0x%x fail(%d).\n",
- enable ? "enable" : "disable",
- dev_name(data->dev), peri_mmuen_msk, ret);
+ (u32)portid_msk, enable ? (u32)portid_msk : 0);
}
+ if (ret)
+ dev_err(dev, "%s iommu(%s) inframaster 0x%lx fail(%d).\n",
+ enable ? "enable" : "disable",
+ dev_name(data->dev), portid_msk, ret);
}
return ret;
}
@@ -620,15 +640,14 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
struct mtk_iommu_data *data,
unsigned int region_id)
{
+ struct mtk_iommu_domain *share_dom = data->share_dom;
const struct mtk_iommu_iova_region *region;
- struct mtk_iommu_domain *m4u_dom;
-
- /* Always use bank0 in sharing pgtable case */
- m4u_dom = data->bank[0].m4u_dom;
- if (m4u_dom) {
- dom->iop = m4u_dom->iop;
- dom->cfg = m4u_dom->cfg;
- dom->domain.pgsize_bitmap = m4u_dom->cfg.pgsize_bitmap;
+
+ /* Share pgtable when 2 MM IOMMU share the pgtable or one IOMMU use multiple iova ranges */
+ if (share_dom) {
+ dom->iop = share_dom->iop;
+ dom->cfg = share_dom->cfg;
+ dom->domain.pgsize_bitmap = share_dom->cfg.pgsize_bitmap;
goto update_iova_region;
}
@@ -658,6 +677,8 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
/* Update our support page sizes bitmap */
dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
+ data->share_dom = dom;
+
update_iova_region:
/* Update the iova region for this domain */
region = data->plat_data->iova_region + region_id;
@@ -708,7 +729,9 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
/* Data is in the frstdata in sharing pgtable case. */
frstdata = mtk_iommu_get_frst_data(hw_list);
+ mutex_lock(&frstdata->mutex);
ret = mtk_iommu_domain_finalise(dom, frstdata, region_id);
+ mutex_unlock(&frstdata->mutex);
if (ret) {
mutex_unlock(&dom->mutex);
return ret;
@@ -1318,7 +1341,8 @@ static int mtk_iommu_probe(struct platform_device *pdev)
dev_err_probe(dev, ret, "mm dts parse fail\n");
goto out_runtime_disable;
}
- } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) {
+ } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
+ !MTK_IOMMU_HAS_FLAG(data->plat_data, CFG_IFA_MASTER_IN_ATF)) {
p = data->plat_data->pericfg_comp_str;
data->pericfg = syscon_regmap_lookup_by_compatible(p);
if (IS_ERR(data->pericfg)) {
@@ -1570,6 +1594,67 @@ static const struct mtk_iommu_plat_data mt8186_data_mm = {
.iova_region_larb_msk = mt8186_larb_region_msk,
};
+static const struct mtk_iommu_plat_data mt8188_data_infra = {
+ .m4u_plat = M4U_MT8188,
+ .flags = WR_THROT_EN | DCM_DISABLE | STD_AXI_MODE | PM_CLK_AO |
+ MTK_IOMMU_TYPE_INFRA | IFA_IOMMU_PCIE_SUPPORT |
+ PGTABLE_PA_35_EN | CFG_IFA_MASTER_IN_ATF,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .banks_num = 1,
+ .banks_enable = {true},
+ .iova_region = single_domain,
+ .iova_region_nr = ARRAY_SIZE(single_domain),
+};
+
+static const u32 mt8188_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = {
+ [0] = {~0, ~0, ~0, ~0}, /* Region0: all ports for larb0/1/2/3 */
+ [1] = {0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, ~0, ~0, ~0}, /* Region1: larb19(21)/21(22)/23 */
+ [2] = {0, 0, 0, 0, ~0, ~0, ~0, ~0, /* Region2: the other larbs. */
+ ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0,
+ ~0, ~0, ~0, ~0, ~0, 0, 0, 0,
+ 0, ~0},
+ [3] = {0},
+ [4] = {[24] = BIT(0) | BIT(1)}, /* Only larb27(24) port0/1 */
+ [5] = {[24] = BIT(2) | BIT(3)}, /* Only larb27(24) port2/3 */
+};
+
+static const struct mtk_iommu_plat_data mt8188_data_vdo = {
+ .m4u_plat = M4U_MT8188,
+ .flags = HAS_BCLK | HAS_SUB_COMM_3BITS | OUT_ORDER_WR_EN |
+ WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE |
+ PGTABLE_PA_35_EN | MTK_IOMMU_TYPE_MM,
+ .hw_list = &m4ulist,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .banks_num = 1,
+ .banks_enable = {true},
+ .iova_region = mt8192_multi_dom,
+ .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
+ .iova_region_larb_msk = mt8188_larb_region_msk,
+ .larbid_remap = {{2}, {0}, {21}, {0}, {19}, {9, 10,
+ 11 /* 11a */, 25 /* 11c */},
+ {13, 0, 29 /* 16b */, 30 /* 17b */, 0}, {5}},
+};
+
+static const struct mtk_iommu_plat_data mt8188_data_vpp = {
+ .m4u_plat = M4U_MT8188,
+ .flags = HAS_BCLK | HAS_SUB_COMM_3BITS | OUT_ORDER_WR_EN |
+ WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE |
+ PGTABLE_PA_35_EN | MTK_IOMMU_TYPE_MM,
+ .hw_list = &m4ulist,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .banks_num = 1,
+ .banks_enable = {true},
+ .iova_region = mt8192_multi_dom,
+ .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
+ .iova_region_larb_msk = mt8188_larb_region_msk,
+ .larbid_remap = {{1}, {3}, {23}, {7}, {MTK_INVALID_LARBID},
+ {12, 15, 24 /* 11b */}, {14, MTK_INVALID_LARBID,
+ 16 /* 16a */, 17 /* 17a */, MTK_INVALID_LARBID,
+ 27, 28 /* ccu0 */, MTK_INVALID_LARBID}, {4, 6}},
+};
+
static const unsigned int mt8192_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = {
[0] = {~0, ~0}, /* Region0: larb0/1 */
[1] = {0, 0, 0, 0, ~0, ~0, 0, ~0}, /* Region1: larb4/5/7 */
@@ -1678,6 +1763,9 @@ static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
{ .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
{ .compatible = "mediatek,mt8186-iommu-mm", .data = &mt8186_data_mm}, /* mm: m4u */
+ { .compatible = "mediatek,mt8188-iommu-infra", .data = &mt8188_data_infra},
+ { .compatible = "mediatek,mt8188-iommu-vdo", .data = &mt8188_data_vdo},
+ { .compatible = "mediatek,mt8188-iommu-vpp", .data = &mt8188_data_vpp},
{ .compatible = "mediatek,mt8192-m4u", .data = &mt8192_data},
{ .compatible = "mediatek,mt8195-iommu-infra", .data = &mt8195_data_infra},
{ .compatible = "mediatek,mt8195-iommu-vdo", .data = &mt8195_data_vdo},
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 40f57d293a79..157b286e36bf 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -159,7 +159,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
* If we have reason to believe the IOMMU driver missed the initial
* probe for dev, replay it to get things in order.
*/
- if (!err && dev->bus && !device_iommu_mapped(dev))
+ if (!err && dev->bus)
err = iommu_probe_device(dev);
/* Ignore all other errors apart from EPROBE_DEFER */
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 4054030c3237..8ff69fbf9f65 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -98,9 +98,8 @@ struct rk_iommu_ops {
phys_addr_t (*pt_address)(u32 dte);
u32 (*mk_dtentries)(dma_addr_t pt_dma);
u32 (*mk_ptentries)(phys_addr_t page, int prot);
- phys_addr_t (*dte_addr_phys)(u32 addr);
- u32 (*dma_addr_dte)(dma_addr_t dt_dma);
u64 dma_bit_mask;
+ gfp_t gfp_flags;
};
struct rk_iommu {
@@ -278,8 +277,8 @@ static u32 rk_mk_pte(phys_addr_t page, int prot)
/*
* In v2:
* 31:12 - Page address bit 31:0
- * 11:9 - Page address bit 34:32
- * 8:4 - Page address bit 39:35
+ * 11: 8 - Page address bit 35:32
+ * 7: 4 - Page address bit 39:36
* 3 - Security
* 2 - Writable
* 1 - Readable
@@ -506,7 +505,7 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
/*
* Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
- * and verifying that upper 5 nybbles are read back.
+ * and verifying that upper 5 (v1) or 7 (v2) nybbles are read back.
*/
for (i = 0; i < iommu->num_mmu; i++) {
dte_addr = rk_ops->pt_address(DTE_ADDR_DUMMY);
@@ -531,33 +530,6 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
return 0;
}
-static inline phys_addr_t rk_dte_addr_phys(u32 addr)
-{
- return (phys_addr_t)addr;
-}
-
-static inline u32 rk_dma_addr_dte(dma_addr_t dt_dma)
-{
- return dt_dma;
-}
-
-#define DT_HI_MASK GENMASK_ULL(39, 32)
-#define DTE_BASE_HI_MASK GENMASK(11, 4)
-#define DT_SHIFT 28
-
-static inline phys_addr_t rk_dte_addr_phys_v2(u32 addr)
-{
- u64 addr64 = addr;
- return (phys_addr_t)(addr64 & RK_DTE_PT_ADDRESS_MASK) |
- ((addr64 & DTE_BASE_HI_MASK) << DT_SHIFT);
-}
-
-static inline u32 rk_dma_addr_dte_v2(dma_addr_t dt_dma)
-{
- return (dt_dma & RK_DTE_PT_ADDRESS_MASK) |
- ((dt_dma & DT_HI_MASK) >> DT_SHIFT);
-}
-
static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
{
void __iomem *base = iommu->bases[index];
@@ -577,7 +549,7 @@ static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
page_offset = rk_iova_page_offset(iova);
mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
- mmu_dte_addr_phys = rk_ops->dte_addr_phys(mmu_dte_addr);
+ mmu_dte_addr_phys = rk_ops->pt_address(mmu_dte_addr);
dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
dte_addr = phys_to_virt(dte_addr_phys);
@@ -756,7 +728,7 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
if (rk_dte_is_pt_valid(dte))
goto done;
- page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | rk_ops->gfp_flags);
if (!page_table)
return ERR_PTR(-ENOMEM);
@@ -967,7 +939,7 @@ static int rk_iommu_enable(struct rk_iommu *iommu)
for (i = 0; i < iommu->num_mmu; i++) {
rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
- rk_ops->dma_addr_dte(rk_domain->dt_dma));
+ rk_ops->mk_dtentries(rk_domain->dt_dma));
rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
}
@@ -1105,7 +1077,7 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
* Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
* Allocate one 4 KiB page for each table.
*/
- rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
+ rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | rk_ops->gfp_flags);
if (!rk_domain->dt)
goto err_free_domain;
@@ -1405,18 +1377,16 @@ static struct rk_iommu_ops iommu_data_ops_v1 = {
.pt_address = &rk_dte_pt_address,
.mk_dtentries = &rk_mk_dte,
.mk_ptentries = &rk_mk_pte,
- .dte_addr_phys = &rk_dte_addr_phys,
- .dma_addr_dte = &rk_dma_addr_dte,
.dma_bit_mask = DMA_BIT_MASK(32),
+ .gfp_flags = GFP_DMA32,
};
static struct rk_iommu_ops iommu_data_ops_v2 = {
.pt_address = &rk_dte_pt_address_v2,
.mk_dtentries = &rk_mk_dte_v2,
.mk_ptentries = &rk_mk_pte_v2,
- .dte_addr_phys = &rk_dte_addr_phys_v2,
- .dma_addr_dte = &rk_dma_addr_dte_v2,
.dma_bit_mask = DMA_BIT_MASK(40),
+ .gfp_flags = 0,
};
static const struct of_device_id rk_iommu_dt_ids[] = {
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index 39e34fdeccda..2fa9afebd4f5 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -14,6 +14,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -148,6 +149,7 @@ static struct iommu_domain *sprd_iommu_domain_alloc(unsigned int domain_type)
dom->domain.geometry.aperture_start = 0;
dom->domain.geometry.aperture_end = SZ_256M - 1;
+ dom->domain.geometry.force_aperture = true;
return &dom->domain;
}
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 1cbf063ccf14..e445f80d0226 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -9,7 +9,7 @@
#include <linux/iommu.h>
#include <linux/kernel.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 3551ed057774..17dcd826f5c2 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -13,7 +13,7 @@
#include <linux/interval_tree.h>
#include <linux/iommu.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
index 3db4592cda1c..f407cce9ecaa 100644
--- a/drivers/irqchip/irq-gic-common.h
+++ b/drivers/irqchip/irq-gic-common.h
@@ -29,4 +29,8 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
void gic_enable_of_quirks(const struct device_node *np,
const struct gic_quirk *quirks, void *data);
+#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
+#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
+#define RDIST_FLAGS_FORCE_NON_SHAREABLE (1 << 2)
+
#endif /* _IRQ_GIC_COMMON_H */
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index e0c2b10d154d..75a2dd550625 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -44,10 +44,6 @@
#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
#define ITS_FLAGS_FORCE_NON_SHAREABLE (1ULL << 3)
-#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
-#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
-#define RDIST_FLAGS_FORCE_NON_SHAREABLE (1 << 2)
-
#define RD_LOCAL_LPI_ENABLED BIT(0)
#define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1)
#define RD_LOCAL_MEMRESERVE_DONE BIT(2)
@@ -4754,6 +4750,14 @@ static bool __maybe_unused its_enable_rk3588001(void *data)
return true;
}
+static bool its_set_non_coherent(void *data)
+{
+ struct its_node *its = data;
+
+ its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
+ return true;
+}
+
static const struct gic_quirk its_quirks[] = {
#ifdef CONFIG_CAVIUM_ERRATUM_22375
{
@@ -4809,6 +4813,11 @@ static const struct gic_quirk its_quirks[] = {
},
#endif
{
+ .desc = "ITS: non-coherent attribute",
+ .property = "dma-noncoherent",
+ .init = its_set_non_coherent,
+ },
+ {
}
};
@@ -4817,6 +4826,10 @@ static void its_enable_quirks(struct its_node *its)
u32 iidr = readl_relaxed(its->base + GITS_IIDR);
gic_enable_quirks(iidr, its_quirks, its);
+
+ if (is_of_node(its->fwnode_handle))
+ gic_enable_of_quirks(to_of_node(its->fwnode_handle),
+ its_quirks, its);
}
static int its_save_disable(void)
@@ -4952,7 +4965,7 @@ out_unmap:
return NULL;
}
-static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
+static int its_init_domain(struct its_node *its)
{
struct irq_domain *inner_domain;
struct msi_domain_info *info;
@@ -4966,7 +4979,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
inner_domain = irq_domain_create_hierarchy(its_parent,
its->msi_domain_flags, 0,
- handle, &its_domain_ops,
+ its->fwnode_handle, &its_domain_ops,
info);
if (!inner_domain) {
kfree(info);
@@ -5017,8 +5030,7 @@ static int its_init_vpe_domain(void)
return 0;
}
-static int __init its_compute_its_list_map(struct resource *res,
- void __iomem *its_base)
+static int __init its_compute_its_list_map(struct its_node *its)
{
int its_number;
u32 ctlr;
@@ -5032,15 +5044,15 @@ static int __init its_compute_its_list_map(struct resource *res,
its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
if (its_number >= GICv4_ITS_LIST_MAX) {
pr_err("ITS@%pa: No ITSList entry available!\n",
- &res->start);
+ &its->phys_base);
return -EINVAL;
}
- ctlr = readl_relaxed(its_base + GITS_CTLR);
+ ctlr = readl_relaxed(its->base + GITS_CTLR);
ctlr &= ~GITS_CTLR_ITS_NUMBER;
ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
- writel_relaxed(ctlr, its_base + GITS_CTLR);
- ctlr = readl_relaxed(its_base + GITS_CTLR);
+ writel_relaxed(ctlr, its->base + GITS_CTLR);
+ ctlr = readl_relaxed(its->base + GITS_CTLR);
if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
its_number = ctlr & GITS_CTLR_ITS_NUMBER;
its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
@@ -5048,75 +5060,50 @@ static int __init its_compute_its_list_map(struct resource *res,
if (test_and_set_bit(its_number, &its_list_map)) {
pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
- &res->start, its_number);
+ &its->phys_base, its_number);
return -EINVAL;
}
return its_number;
}
-static int __init its_probe_one(struct resource *res,
- struct fwnode_handle *handle, int numa_node)
+static int __init its_probe_one(struct its_node *its)
{
- struct its_node *its;
- void __iomem *its_base;
- u64 baser, tmp, typer;
+ u64 baser, tmp;
struct page *page;
u32 ctlr;
int err;
- its_base = its_map_one(res, &err);
- if (!its_base)
- return err;
-
- pr_info("ITS %pR\n", res);
-
- its = kzalloc(sizeof(*its), GFP_KERNEL);
- if (!its) {
- err = -ENOMEM;
- goto out_unmap;
- }
-
- raw_spin_lock_init(&its->lock);
- mutex_init(&its->dev_alloc_lock);
- INIT_LIST_HEAD(&its->entry);
- INIT_LIST_HEAD(&its->its_device_list);
- typer = gic_read_typer(its_base + GITS_TYPER);
- its->typer = typer;
- its->base = its_base;
- its->phys_base = res->start;
if (is_v4(its)) {
- if (!(typer & GITS_TYPER_VMOVP)) {
- err = its_compute_its_list_map(res, its_base);
+ if (!(its->typer & GITS_TYPER_VMOVP)) {
+ err = its_compute_its_list_map(its);
if (err < 0)
- goto out_free_its;
+ goto out;
its->list_nr = err;
pr_info("ITS@%pa: Using ITS number %d\n",
- &res->start, err);
+ &its->phys_base, err);
} else {
- pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
+ pr_info("ITS@%pa: Single VMOVP capable\n", &its->phys_base);
}
if (is_v4_1(its)) {
- u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer);
+ u32 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
- its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K);
+ its->sgir_base = ioremap(its->phys_base + SZ_128K, SZ_64K);
if (!its->sgir_base) {
err = -ENOMEM;
- goto out_free_its;
+ goto out;
}
- its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
+ its->mpidr = readl_relaxed(its->base + GITS_MPIDR);
pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
- &res->start, its->mpidr, svpet);
+ &its->phys_base, its->mpidr, svpet);
}
}
- its->numa_node = numa_node;
-
page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
get_order(ITS_CMD_QUEUE_SZ));
if (!page) {
@@ -5125,12 +5112,9 @@ static int __init its_probe_one(struct resource *res,
}
its->cmd_base = (void *)page_address(page);
its->cmd_write = its->cmd_base;
- its->fwnode_handle = handle;
its->get_msi_base = its_irq_get_msi_base;
its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI;
- its_enable_quirks(its);
-
err = its_alloc_tables(its);
if (err)
goto out_free_cmd;
@@ -5174,7 +5158,7 @@ static int __init its_probe_one(struct resource *res,
ctlr |= GITS_CTLR_ImDe;
writel_relaxed(ctlr, its->base + GITS_CTLR);
- err = its_init_domain(handle, its);
+ err = its_init_domain(its);
if (err)
goto out_free_tables;
@@ -5191,11 +5175,8 @@ out_free_cmd:
out_unmap_sgir:
if (its->sgir_base)
iounmap(its->sgir_base);
-out_free_its:
- kfree(its);
-out_unmap:
- iounmap(its_base);
- pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
+out:
+ pr_err("ITS@%pa: failed probing (%d)\n", &its->phys_base, err);
return err;
}
@@ -5356,10 +5337,53 @@ static const struct of_device_id its_device_id[] = {
{},
};
+static struct its_node __init *its_node_init(struct resource *res,
+ struct fwnode_handle *handle, int numa_node)
+{
+ void __iomem *its_base;
+ struct its_node *its;
+ int err;
+
+ its_base = its_map_one(res, &err);
+ if (!its_base)
+ return NULL;
+
+ pr_info("ITS %pR\n", res);
+
+ its = kzalloc(sizeof(*its), GFP_KERNEL);
+ if (!its)
+ goto out_unmap;
+
+ raw_spin_lock_init(&its->lock);
+ mutex_init(&its->dev_alloc_lock);
+ INIT_LIST_HEAD(&its->entry);
+ INIT_LIST_HEAD(&its->its_device_list);
+
+ its->typer = gic_read_typer(its_base + GITS_TYPER);
+ its->base = its_base;
+ its->phys_base = res->start;
+
+ its->numa_node = numa_node;
+ its->fwnode_handle = handle;
+
+ return its;
+
+out_unmap:
+ iounmap(its_base);
+ return NULL;
+}
+
+static void its_node_destroy(struct its_node *its)
+{
+ iounmap(its->base);
+ kfree(its);
+}
+
static int __init its_of_probe(struct device_node *node)
{
struct device_node *np;
struct resource res;
+ int err;
/*
* Make sure *all* the ITS are reset before we probe any, as
@@ -5369,8 +5393,6 @@ static int __init its_of_probe(struct device_node *node)
*/
for (np = of_find_matching_node(node, its_device_id); np;
np = of_find_matching_node(np, its_device_id)) {
- int err;
-
if (!of_device_is_available(np) ||
!of_property_read_bool(np, "msi-controller") ||
of_address_to_resource(np, 0, &res))
@@ -5383,6 +5405,8 @@ static int __init its_of_probe(struct device_node *node)
for (np = of_find_matching_node(node, its_device_id); np;
np = of_find_matching_node(np, its_device_id)) {
+ struct its_node *its;
+
if (!of_device_is_available(np))
continue;
if (!of_property_read_bool(np, "msi-controller")) {
@@ -5396,7 +5420,17 @@ static int __init its_of_probe(struct device_node *node)
continue;
}
- its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
+
+ its = its_node_init(&res, &np->fwnode, of_node_to_nid(np));
+ if (!its)
+ return -ENOMEM;
+
+ its_enable_quirks(its);
+ err = its_probe_one(its);
+ if (err) {
+ its_node_destroy(its);
+ return err;
+ }
}
return 0;
}
@@ -5508,6 +5542,7 @@ static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
{
struct acpi_madt_generic_translator *its_entry;
struct fwnode_handle *dom_handle;
+ struct its_node *its;
struct resource res;
int err;
@@ -5532,11 +5567,18 @@ static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
goto dom_err;
}
- err = its_probe_one(&res, dom_handle,
- acpi_get_its_numa_node(its_entry->translation_id));
+ its = its_node_init(&res, dom_handle,
+ acpi_get_its_numa_node(its_entry->translation_id));
+ if (!its) {
+ err = -ENOMEM;
+ goto node_err;
+ }
+
+ err = its_probe_one(its);
if (!err)
return 0;
+node_err:
iort_deregister_domain_token(its_entry->translation_id);
dom_err:
irq_domain_free_fwnode(dom_handle);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index eedfa8e9f077..f59ac9586b7b 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1857,6 +1857,14 @@ static bool gic_enable_quirk_arm64_2941627(void *data)
return true;
}
+static bool rd_set_non_coherent(void *data)
+{
+ struct gic_chip_data *d = data;
+
+ d->rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
+ return true;
+}
+
static const struct gic_quirk gic_quirks[] = {
{
.desc = "GICv3: Qualcomm MSM8996 broken firmware",
@@ -1924,6 +1932,11 @@ static const struct gic_quirk gic_quirks[] = {
.init = gic_enable_quirk_arm64_2941627,
},
{
+ .desc = "GICv3: non-coherent attribute",
+ .property = "dma-noncoherent",
+ .init = rd_set_non_coherent,
+ },
+ {
}
};
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index 4bbfa2b0a4df..96f4e322ed6b 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -118,7 +118,7 @@ static void rzg2l_irqc_irq_disable(struct irq_data *d)
raw_spin_lock(&priv->lock);
reg = readl_relaxed(priv->base + TSSR(tssr_index));
- reg &= ~(TSSEL_MASK << tssr_offset);
+ reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
writel_relaxed(reg, priv->base + TSSR(tssr_index));
raw_spin_unlock(&priv->lock);
}
@@ -130,8 +130,8 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d)
unsigned int hw_irq = irqd_to_hwirq(d);
if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) {
+ unsigned long tint = (uintptr_t)irq_data_get_irq_chip_data(d);
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
- unsigned long tint = (uintptr_t)d->chip_data;
u32 offset = hw_irq - IRQC_TINT_START;
u32 tssr_offset = TSSR_OFFSET(offset);
u8 tssr_index = TSSR_INDEX(offset);
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index 4adeee1bc391..e8d01b14ccdd 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -155,8 +155,16 @@ static int __init riscv_intc_init(struct device_node *node,
* for each INTC DT node. We only need to do INTC initialization
* for the INTC DT node belonging to boot CPU (or boot HART).
*/
- if (riscv_hartid_to_cpuid(hartid) != smp_processor_id())
+ if (riscv_hartid_to_cpuid(hartid) != smp_processor_id()) {
+ /*
+ * The INTC nodes of each CPU are suppliers for downstream
+ * interrupt controllers (such as PLIC, IMSIC and APLIC
+ * direct-mode) so we should mark an INTC node as initialized
+ * if we are not creating IRQ domain for it.
+ */
+ fwnode_dev_initialized(of_fwnode_handle(node), true);
return 0;
+ }
return riscv_intc_init_common(of_node_to_fwnode(node));
}
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index d8ba5fba7450..971240e2e31b 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -460,6 +460,7 @@ static const struct irq_domain_ops irq_exti_domain_ops = {
.map = irq_map_generic_chip,
.alloc = stm32_exti_alloc,
.free = stm32_exti_free,
+ .xlate = irq_domain_xlate_twocell,
};
static void stm32_irq_ack(struct irq_data *d)
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index 8c581c985aa7..7f314e58f3ce 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -12,6 +12,7 @@
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
+#include <linux/irqchip/xtensa-mx.h>
#include <linux/of.h>
#include <asm/mxregs.h>
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index a32c0d28d038..74b2f124116e 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -22,9 +22,20 @@
#define PDC_MAX_GPIO_IRQS 256
+/* Valid only on HW version < 3.2 */
#define IRQ_ENABLE_BANK 0x10
#define IRQ_i_CFG 0x110
+/* Valid only on HW version >= 3.2 */
+#define IRQ_i_CFG_IRQ_ENABLE 3
+
+#define IRQ_i_CFG_TYPE_MASK GENMASK(2, 0)
+
+#define PDC_VERSION_REG 0x1000
+
+/* Notable PDC versions */
+#define PDC_VERSION_3_2 0x30200
+
struct pdc_pin_region {
u32 pin_base;
u32 parent_base;
@@ -37,6 +48,7 @@ static DEFINE_RAW_SPINLOCK(pdc_lock);
static void __iomem *pdc_base;
static struct pdc_pin_region *pdc_region;
static int pdc_region_cnt;
+static unsigned int pdc_version;
static void pdc_reg_write(int reg, u32 i, u32 val)
{
@@ -48,20 +60,32 @@ static u32 pdc_reg_read(int reg, u32 i)
return readl_relaxed(pdc_base + reg + i * sizeof(u32));
}
-static void pdc_enable_intr(struct irq_data *d, bool on)
+static void __pdc_enable_intr(int pin_out, bool on)
{
- int pin_out = d->hwirq;
unsigned long enable;
- unsigned long flags;
- u32 index, mask;
- index = pin_out / 32;
- mask = pin_out % 32;
+ if (pdc_version < PDC_VERSION_3_2) {
+ u32 index, mask;
+
+ index = pin_out / 32;
+ mask = pin_out % 32;
+
+ enable = pdc_reg_read(IRQ_ENABLE_BANK, index);
+ __assign_bit(mask, &enable, on);
+ pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
+ } else {
+ enable = pdc_reg_read(IRQ_i_CFG, pin_out);
+ __assign_bit(IRQ_i_CFG_IRQ_ENABLE, &enable, on);
+ pdc_reg_write(IRQ_i_CFG, pin_out, enable);
+ }
+}
+
+static void pdc_enable_intr(struct irq_data *d, bool on)
+{
+ unsigned long flags;
raw_spin_lock_irqsave(&pdc_lock, flags);
- enable = pdc_reg_read(IRQ_ENABLE_BANK, index);
- __assign_bit(mask, &enable, on);
- pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
+ __pdc_enable_intr(d->hwirq, on);
raw_spin_unlock_irqrestore(&pdc_lock, flags);
}
@@ -142,6 +166,7 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
}
old_pdc_type = pdc_reg_read(IRQ_i_CFG, d->hwirq);
+ pdc_type |= (old_pdc_type & ~IRQ_i_CFG_TYPE_MASK);
pdc_reg_write(IRQ_i_CFG, d->hwirq, pdc_type);
ret = irq_chip_set_type_parent(d, type);
@@ -246,7 +271,6 @@ static const struct irq_domain_ops qcom_pdc_ops = {
static int pdc_setup_pin_mapping(struct device_node *np)
{
int ret, n, i;
- u32 irq_index, reg_index, val;
n = of_property_count_elems_of_size(np, "qcom,pdc-ranges", sizeof(u32));
if (n <= 0 || n % 3)
@@ -276,29 +300,38 @@ static int pdc_setup_pin_mapping(struct device_node *np)
if (ret)
return ret;
- for (i = 0; i < pdc_region[n].cnt; i++) {
- reg_index = (i + pdc_region[n].pin_base) >> 5;
- irq_index = (i + pdc_region[n].pin_base) & 0x1f;
- val = pdc_reg_read(IRQ_ENABLE_BANK, reg_index);
- val &= ~BIT(irq_index);
- pdc_reg_write(IRQ_ENABLE_BANK, reg_index, val);
- }
+ for (i = 0; i < pdc_region[n].cnt; i++)
+ __pdc_enable_intr(i + pdc_region[n].pin_base, 0);
}
return 0;
}
+#define QCOM_PDC_SIZE 0x30000
+
static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
{
struct irq_domain *parent_domain, *pdc_domain;
+ resource_size_t res_size;
+ struct resource res;
int ret;
- pdc_base = of_iomap(node, 0);
+ /* compat with old sm8150 DT which had very small region for PDC */
+ if (of_address_to_resource(node, 0, &res))
+ return -EINVAL;
+
+ res_size = max_t(resource_size_t, resource_size(&res), QCOM_PDC_SIZE);
+ if (res_size > resource_size(&res))
+ pr_warn("%pOF: invalid reg size, please fix DT\n", node);
+
+ pdc_base = ioremap(res.start, res_size);
if (!pdc_base) {
pr_err("%pOF: unable to map PDC registers\n", node);
return -ENXIO;
}
+ pdc_version = pdc_reg_read(PDC_VERSION_REG, 0);
+
parent_domain = irq_find_host(parent);
if (!parent_domain) {
pr_err("%pOF: unable to find PDC's parent domain\n", node);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 6046dfeca16f..b92208eccdea 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -521,6 +521,15 @@ config LEDS_PCA963X
LED driver chip accessed via the I2C bus. Supported
devices include PCA9633 and PCA9634
+config LEDS_PCA995X
+ tristate "LED Support for PCA995x I2C chips"
+ depends on LEDS_CLASS
+ depends on I2C
+ help
+ This option enables support for LEDs connected to PCA995x
+ LED driver chips accessed via the I2C bus. Supported
+ devices include PCA9955BTW, PCA9952TW and PCA9955TW.
+
config LEDS_WM831X_STATUS
tristate "LED support for status LEDs on WM831x PMICs"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index d71f1226540c..d7348e8bc019 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -72,6 +72,7 @@ obj-$(CONFIG_LEDS_OT200) += leds-ot200.o
obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
obj-$(CONFIG_LEDS_PCA963X) += leds-pca963x.o
+obj-$(CONFIG_LEDS_PCA995X) += leds-pca995x.o
obj-$(CONFIG_LEDS_PM8058) += leds-pm8058.o
obj-$(CONFIG_LEDS_POWERNV) += leds-powernv.o
obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
diff --git a/drivers/leds/blink/Kconfig b/drivers/leds/blink/Kconfig
index 945c84286a4e..bdcb7377cd4e 100644
--- a/drivers/leds/blink/Kconfig
+++ b/drivers/leds/blink/Kconfig
@@ -1,10 +1,10 @@
config LEDS_BCM63138
tristate "LED Support for Broadcom BCM63138 SoC"
depends on LEDS_CLASS
- depends on ARCH_BCM4908 || ARCH_BCM_5301X || BCM63XX || COMPILE_TEST
+ depends on ARCH_BCMBCA || ARCH_BCM_5301X || BCM63XX || COMPILE_TEST
depends on HAS_IOMEM
depends on OF
- default ARCH_BCM4908
+ default ARCH_BCMBCA
help
This option enables support for LED controller that is part of
BCM63138 SoC. The same hardware block is known to be also used
diff --git a/drivers/leds/flash/Kconfig b/drivers/leds/flash/Kconfig
index 4ed2efc65434..4e08dbc05709 100644
--- a/drivers/leds/flash/Kconfig
+++ b/drivers/leds/flash/Kconfig
@@ -89,6 +89,8 @@ config LEDS_QCOM_FLASH
the total LED current will be split symmetrically on each channel and
they will be enabled/disabled at the same time.
+ This driver can be built as a module, it will be called "leds-qcom-flash".
+
config LEDS_RT4505
tristate "LED support for RT4505 flashlight controller"
depends on I2C && OF
diff --git a/drivers/leds/flash/leds-qcom-flash.c b/drivers/leds/flash/leds-qcom-flash.c
index b089ca1a1901..a73d3ea5c97a 100644
--- a/drivers/leds/flash/leds-qcom-flash.c
+++ b/drivers/leds/flash/leds-qcom-flash.c
@@ -309,6 +309,10 @@ static int qcom_flash_strobe_set(struct led_classdev_flash *fled_cdev, bool stat
struct qcom_flash_led *led = flcdev_to_qcom_fled(fled_cdev);
int rc;
+ rc = set_flash_strobe(led, SW_STROBE, false);
+ if (rc)
+ return rc;
+
rc = set_flash_current(led, led->flash_current_ma, FLASH_MODE);
if (rc)
return rc;
@@ -745,6 +749,7 @@ static int qcom_flash_led_probe(struct platform_device *pdev)
return 0;
release:
+ fwnode_handle_put(child);
while (flash_data->v4l2_flash[flash_data->leds_count] && flash_data->leds_count)
v4l2_flash_release(flash_data->v4l2_flash[flash_data->leds_count--]);
return rc;
diff --git a/drivers/leds/led-class-multicolor.c b/drivers/leds/led-class-multicolor.c
index e317408583df..ec62a4811613 100644
--- a/drivers/leds/led-class-multicolor.c
+++ b/drivers/leds/led-class-multicolor.c
@@ -6,6 +6,7 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/led-class-multicolor.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
@@ -19,9 +20,10 @@ int led_mc_calc_color_components(struct led_classdev_mc *mcled_cdev,
int i;
for (i = 0; i < mcled_cdev->num_colors; i++)
- mcled_cdev->subled_info[i].brightness = brightness *
- mcled_cdev->subled_info[i].intensity /
- led_cdev->max_brightness;
+ mcled_cdev->subled_info[i].brightness =
+ DIV_ROUND_CLOSEST(brightness *
+ mcled_cdev->subled_info[i].intensity,
+ led_cdev->max_brightness);
return 0;
}
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 6dae56b914fe..974b84f6bd6a 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -22,7 +22,6 @@
#include <linux/of.h>
#include "leds.h"
-static struct class *leds_class;
static DEFINE_MUTEX(leds_lookup_lock);
static LIST_HEAD(leds_lookup_list);
@@ -76,6 +75,19 @@ static ssize_t max_brightness_show(struct device *dev,
}
static DEVICE_ATTR_RO(max_brightness);
+static ssize_t color_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const char *color_text = "invalid";
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+ if (led_cdev->color < LED_COLOR_ID_MAX)
+ color_text = led_colors[led_cdev->color];
+
+ return sysfs_emit(buf, "%s\n", color_text);
+}
+static DEVICE_ATTR_RO(color);
+
#ifdef CONFIG_LEDS_TRIGGERS
static BIN_ATTR(trigger, 0644, led_trigger_read, led_trigger_write, 0);
static struct bin_attribute *led_trigger_bin_attrs[] = {
@@ -90,6 +102,7 @@ static const struct attribute_group led_trigger_group = {
static struct attribute *led_class_attrs[] = {
&dev_attr_brightness.attr,
&dev_attr_max_brightness.attr,
+ &dev_attr_color.attr,
NULL,
};
@@ -234,6 +247,12 @@ static struct led_classdev *led_module_get(struct device *led_dev)
return led_cdev;
}
+static const struct class leds_class = {
+ .name = "leds",
+ .dev_groups = led_groups,
+ .pm = &leds_class_dev_pm_ops,
+};
+
/**
* of_led_get() - request a LED device via the LED framework
* @np: device node to get the LED device from
@@ -251,7 +270,7 @@ struct led_classdev *of_led_get(struct device_node *np, int index)
if (!led_node)
return ERR_PTR(-ENOENT);
- led_dev = class_find_device_by_of_node(leds_class, led_node);
+ led_dev = class_find_device_by_of_node(&leds_class, led_node);
of_node_put(led_node);
put_device(led_dev);
@@ -346,7 +365,7 @@ struct led_classdev *led_get(struct device *dev, char *con_id)
if (!provider)
return ERR_PTR(-ENOENT);
- led_dev = class_find_device_by_name(leds_class, provider);
+ led_dev = class_find_device_by_name(&leds_class, provider);
kfree_const(provider);
return led_module_get(led_dev);
@@ -402,6 +421,31 @@ void led_remove_lookup(struct led_lookup_data *led_lookup)
}
EXPORT_SYMBOL_GPL(led_remove_lookup);
+/**
+ * devm_of_led_get_optional - Resource-managed request of an optional LED device
+ * @dev: LED consumer
+ * @index: index of the LED to obtain in the consumer
+ *
+ * The device node of the device is parsed to find the requested LED device.
+ * The LED device returned from this function is automatically released
+ * on driver detach.
+ *
+ * @return a pointer to a LED device, ERR_PTR(errno) on failure and NULL if the
+ * led was not found.
+ */
+struct led_classdev *__must_check devm_of_led_get_optional(struct device *dev,
+ int index)
+{
+ struct led_classdev *led;
+
+ led = devm_of_led_get(dev, index);
+ if (IS_ERR(led) && PTR_ERR(led) == -ENOENT)
+ return NULL;
+
+ return led;
+}
+EXPORT_SYMBOL_GPL(devm_of_led_get_optional);
+
static int led_classdev_next_name(const char *init_name, char *name,
size_t len)
{
@@ -412,7 +456,7 @@ static int led_classdev_next_name(const char *init_name, char *name,
strscpy(name, init_name, len);
while ((ret < len) &&
- (dev = class_find_device_by_name(leds_class, name))) {
+ (dev = class_find_device_by_name(&leds_class, name))) {
put_device(dev);
ret = snprintf(name, len, "%s_%u", init_name, ++i);
}
@@ -457,6 +501,14 @@ int led_classdev_register_ext(struct device *parent,
if (fwnode_property_present(init_data->fwnode,
"retain-state-shutdown"))
led_cdev->flags |= LED_RETAIN_AT_SHUTDOWN;
+
+ fwnode_property_read_u32(init_data->fwnode,
+ "max-brightness",
+ &led_cdev->max_brightness);
+
+ if (fwnode_property_present(init_data->fwnode, "color"))
+ fwnode_property_read_u32(init_data->fwnode, "color",
+ &led_cdev->color);
}
} else {
proposed_name = led_cdev->name;
@@ -466,10 +518,13 @@ int led_classdev_register_ext(struct device *parent,
if (ret < 0)
return ret;
+ if (led_cdev->color >= LED_COLOR_ID_MAX)
+ dev_warn(parent, "LED %s color identifier out of range\n", final_name);
+
mutex_init(&led_cdev->led_access);
mutex_lock(&led_cdev->led_access);
- led_cdev->dev = device_create_with_groups(leds_class, parent, 0,
- led_cdev, led_cdev->groups, "%s", final_name);
+ led_cdev->dev = device_create_with_groups(&leds_class, parent, 0,
+ led_cdev, led_cdev->groups, "%s", final_name);
if (IS_ERR(led_cdev->dev)) {
mutex_unlock(&led_cdev->led_access);
return PTR_ERR(led_cdev->dev);
@@ -626,17 +681,12 @@ EXPORT_SYMBOL_GPL(devm_led_classdev_unregister);
static int __init leds_init(void)
{
- leds_class = class_create("leds");
- if (IS_ERR(leds_class))
- return PTR_ERR(leds_class);
- leds_class->pm = &leds_class_dev_pm_ops;
- leds_class->dev_groups = led_groups;
- return 0;
+ return class_register(&leds_class);
}
static void __exit leds_exit(void)
{
- class_destroy(leds_class);
+ class_unregister(&leds_class);
}
subsys_initcall(leds_init);
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index b9b1295833c9..214ed81eb0e9 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -474,10 +474,6 @@ int led_compose_name(struct device *dev, struct led_init_data *init_data,
struct fwnode_handle *fwnode = init_data->fwnode;
const char *devicename = init_data->devicename;
- /* We want to label LEDs that can produce full range of colors
- * as RGB, not multicolor */
- BUG_ON(props.color == LED_COLOR_ID_MULTI);
-
if (!led_classdev_name)
return -EINVAL;
diff --git a/drivers/leds/leds-an30259a.c b/drivers/leds/leds-an30259a.c
index 24b1041213c2..0216afed3b6e 100644
--- a/drivers/leds/leds-an30259a.c
+++ b/drivers/leds/leds-an30259a.c
@@ -344,7 +344,7 @@ MODULE_DEVICE_TABLE(i2c, an30259a_id);
static struct i2c_driver an30259a_driver = {
.driver = {
.name = "leds-an30259a",
- .of_match_table = of_match_ptr(an30259a_match_table),
+ .of_match_table = an30259a_match_table,
},
.probe = an30259a_probe,
.remove = an30259a_remove,
diff --git a/drivers/leds/leds-ariel.c b/drivers/leds/leds-ariel.c
index 49e1bddaa15e..dd319c7e385f 100644
--- a/drivers/leds/leds-ariel.c
+++ b/drivers/leds/leds-ariel.c
@@ -7,8 +7,8 @@
#include <linux/module.h>
#include <linux/leds.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/of_platform.h>
enum ec_index {
EC_BLUE_LED = 0x01,
diff --git a/drivers/leds/leds-aw200xx.c b/drivers/leds/leds-aw200xx.c
index 96979b8e09b7..691a743cc9b0 100644
--- a/drivers/leds/leds-aw200xx.c
+++ b/drivers/leds/leds-aw200xx.c
@@ -368,7 +368,7 @@ static int aw200xx_probe_fw(struct device *dev, struct aw200xx *chip)
if (!chip->display_rows ||
chip->display_rows > chip->cdef->display_size_rows_max) {
- return dev_err_probe(dev, ret,
+ return dev_err_probe(dev, -EINVAL,
"Invalid leds display size %u\n",
chip->display_rows);
}
@@ -583,7 +583,7 @@ static struct i2c_driver aw200xx_driver = {
.name = "aw200xx",
.of_match_table = aw200xx_match_table,
},
- .probe_new = aw200xx_probe,
+ .probe = aw200xx_probe,
.remove = aw200xx_remove,
.id_table = aw200xx_id,
};
diff --git a/drivers/leds/leds-aw2013.c b/drivers/leds/leds-aw2013.c
index 59765640b70f..91f44b23cb11 100644
--- a/drivers/leds/leds-aw2013.c
+++ b/drivers/leds/leds-aw2013.c
@@ -62,7 +62,7 @@ struct aw2013_led {
struct aw2013 {
struct mutex mutex; /* held when writing to registers */
- struct regulator *vcc_regulator;
+ struct regulator_bulk_data regulators[2];
struct i2c_client *client;
struct aw2013_led leds[AW2013_MAX_LEDS];
struct regmap *regmap;
@@ -106,10 +106,11 @@ static void aw2013_chip_disable(struct aw2013 *chip)
regmap_write(chip->regmap, AW2013_GCR, 0);
- ret = regulator_disable(chip->vcc_regulator);
+ ret = regulator_bulk_disable(ARRAY_SIZE(chip->regulators),
+ chip->regulators);
if (ret) {
dev_err(&chip->client->dev,
- "Failed to disable regulator: %d\n", ret);
+ "Failed to disable regulators: %d\n", ret);
return;
}
@@ -123,10 +124,11 @@ static int aw2013_chip_enable(struct aw2013 *chip)
if (chip->enabled)
return 0;
- ret = regulator_enable(chip->vcc_regulator);
+ ret = regulator_bulk_enable(ARRAY_SIZE(chip->regulators),
+ chip->regulators);
if (ret) {
dev_err(&chip->client->dev,
- "Failed to enable regulator: %d\n", ret);
+ "Failed to enable regulators: %d\n", ret);
return ret;
}
chip->enabled = true;
@@ -348,19 +350,23 @@ static int aw2013_probe(struct i2c_client *client)
goto error;
}
- chip->vcc_regulator = devm_regulator_get(&client->dev, "vcc");
- ret = PTR_ERR_OR_ZERO(chip->vcc_regulator);
- if (ret) {
+ chip->regulators[0].supply = "vcc";
+ chip->regulators[1].supply = "vio";
+ ret = devm_regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(chip->regulators),
+ chip->regulators);
+ if (ret < 0) {
if (ret != -EPROBE_DEFER)
dev_err(&client->dev,
- "Failed to request regulator: %d\n", ret);
+ "Failed to request regulators: %d\n", ret);
goto error;
}
- ret = regulator_enable(chip->vcc_regulator);
+ ret = regulator_bulk_enable(ARRAY_SIZE(chip->regulators),
+ chip->regulators);
if (ret) {
dev_err(&client->dev,
- "Failed to enable regulator: %d\n", ret);
+ "Failed to enable regulators: %d\n", ret);
goto error;
}
@@ -382,10 +388,11 @@ static int aw2013_probe(struct i2c_client *client)
if (ret < 0)
goto error_reg;
- ret = regulator_disable(chip->vcc_regulator);
+ ret = regulator_bulk_disable(ARRAY_SIZE(chip->regulators),
+ chip->regulators);
if (ret) {
dev_err(&client->dev,
- "Failed to disable regulator: %d\n", ret);
+ "Failed to disable regulators: %d\n", ret);
goto error;
}
@@ -394,7 +401,8 @@ static int aw2013_probe(struct i2c_client *client)
return 0;
error_reg:
- regulator_disable(chip->vcc_regulator);
+ regulator_bulk_disable(ARRAY_SIZE(chip->regulators),
+ chip->regulators);
error:
mutex_destroy(&chip->mutex);
@@ -420,7 +428,7 @@ MODULE_DEVICE_TABLE(of, aw2013_match_table);
static struct i2c_driver aw2013_driver = {
.driver = {
.name = "leds-aw2013",
- .of_match_table = of_match_ptr(aw2013_match_table),
+ .of_match_table = aw2013_match_table,
},
.probe = aw2013_probe,
.remove = aw2013_remove,
diff --git a/drivers/leds/leds-cpcap.c b/drivers/leds/leds-cpcap.c
index 7d41ce8c9bb1..87354f17644b 100644
--- a/drivers/leds/leds-cpcap.c
+++ b/drivers/leds/leds-cpcap.c
@@ -7,7 +7,7 @@
#include <linux/mfd/motorola-cpcap.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/leds/leds-cr0014114.c b/drivers/leds/leds-cr0014114.c
index c87686bd7c18..b33bca397ea6 100644
--- a/drivers/leds/leds-cr0014114.c
+++ b/drivers/leds/leds-cr0014114.c
@@ -4,8 +4,8 @@
#include <linux/delay.h>
#include <linux/leds.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/workqueue.h>
diff --git a/drivers/leds/leds-ip30.c b/drivers/leds/leds-ip30.c
index 1f952bad0fe8..2df24c303366 100644
--- a/drivers/leds/leds-ip30.c
+++ b/drivers/leds/leds-ip30.c
@@ -27,22 +27,16 @@ static void ip30led_set(struct led_classdev *led_cdev,
static int ip30led_create(struct platform_device *pdev, int num)
{
- struct resource *res;
struct ip30_led *data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, num);
- if (!res)
- return -EBUSY;
-
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->reg = devm_ioremap_resource(&pdev->dev, res);
+ data->reg = devm_platform_ioremap_resource(pdev, num);
if (IS_ERR(data->reg))
return PTR_ERR(data->reg);
-
switch (num) {
case IP30_LED_SYSTEM:
data->cdev.name = "white:power";
diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c
index 72cb56d305c4..b0a0be77bb33 100644
--- a/drivers/leds/leds-is31fl32xx.c
+++ b/drivers/leds/leds-is31fl32xx.c
@@ -15,7 +15,6 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
/* Used to indicate a device has no such register */
#define IS31FL32XX_REG_NONE 0xFF
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 030c040fdf6d..2ef19ad23b1d 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -594,18 +594,17 @@ static const struct i2c_device_id lp5521_id[] = {
};
MODULE_DEVICE_TABLE(i2c, lp5521_id);
-#ifdef CONFIG_OF
static const struct of_device_id of_lp5521_leds_match[] = {
{ .compatible = "national,lp5521", },
{},
};
MODULE_DEVICE_TABLE(of, of_lp5521_leds_match);
-#endif
+
static struct i2c_driver lp5521_driver = {
.driver = {
.name = "lp5521",
- .of_match_table = of_match_ptr(of_lp5521_leds_match),
+ .of_match_table = of_lp5521_leds_match,
},
.probe = lp5521_probe,
.remove = lp5521_remove,
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index daa6a165fba6..38de853f9939 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -972,7 +972,6 @@ static const struct i2c_device_id lp5523_id[] = {
MODULE_DEVICE_TABLE(i2c, lp5523_id);
-#ifdef CONFIG_OF
static const struct of_device_id of_lp5523_leds_match[] = {
{ .compatible = "national,lp5523", },
{ .compatible = "ti,lp55231", },
@@ -980,12 +979,11 @@ static const struct of_device_id of_lp5523_leds_match[] = {
};
MODULE_DEVICE_TABLE(of, of_lp5523_leds_match);
-#endif
static struct i2c_driver lp5523_driver = {
.driver = {
.name = "lp5523x",
- .of_match_table = of_match_ptr(of_lp5523_leds_match),
+ .of_match_table = of_lp5523_leds_match,
},
.probe = lp5523_probe,
.remove = lp5523_remove,
diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
index 4565cc12cea8..39db9aeb67c5 100644
--- a/drivers/leds/leds-lp5562.c
+++ b/drivers/leds/leds-lp5562.c
@@ -589,19 +589,17 @@ static const struct i2c_device_id lp5562_id[] = {
};
MODULE_DEVICE_TABLE(i2c, lp5562_id);
-#ifdef CONFIG_OF
static const struct of_device_id of_lp5562_leds_match[] = {
{ .compatible = "ti,lp5562", },
{},
};
MODULE_DEVICE_TABLE(of, of_lp5562_leds_match);
-#endif
static struct i2c_driver lp5562_driver = {
.driver = {
.name = "lp5562",
- .of_match_table = of_match_ptr(of_lp5562_leds_match),
+ .of_match_table = of_lp5562_leds_match,
},
.probe = lp5562_probe,
.remove = lp5562_remove,
diff --git a/drivers/leds/leds-lp8501.c b/drivers/leds/leds-lp8501.c
index f11886aa8965..ac50aa88939a 100644
--- a/drivers/leds/leds-lp8501.c
+++ b/drivers/leds/leds-lp8501.c
@@ -380,19 +380,17 @@ static const struct i2c_device_id lp8501_id[] = {
};
MODULE_DEVICE_TABLE(i2c, lp8501_id);
-#ifdef CONFIG_OF
static const struct of_device_id of_lp8501_leds_match[] = {
{ .compatible = "ti,lp8501", },
{},
};
MODULE_DEVICE_TABLE(of, of_lp8501_leds_match);
-#endif
static struct i2c_driver lp8501_driver = {
.driver = {
.name = "lp8501",
- .of_match_table = of_match_ptr(of_lp8501_leds_match),
+ .of_match_table = of_lp8501_leds_match,
},
.probe = lp8501_probe,
.remove = lp8501_remove,
diff --git a/drivers/leds/leds-mlxreg.c b/drivers/leds/leds-mlxreg.c
index b7855c93bd72..39210653acf7 100644
--- a/drivers/leds/leds-mlxreg.c
+++ b/drivers/leds/leds-mlxreg.c
@@ -8,7 +8,6 @@
#include <linux/io.h>
#include <linux/leds.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_data/mlxreg.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 1677d66d8b0e..f3010c472bbd 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -247,7 +247,7 @@ static int ns2_led_probe(struct platform_device *pdev)
if (!count)
return -ENODEV;
- leds = devm_kzalloc(dev, array_size(sizeof(*leds), count), GFP_KERNEL);
+ leds = devm_kcalloc(dev, count, sizeof(*leds), GFP_KERNEL);
if (!leds)
return -ENOMEM;
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 8b5c62083e50..bf8bb8fc007c 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -18,7 +18,6 @@
#include <linux/leds-pca9532.h>
#include <linux/gpio/driver.h>
#include <linux/of.h>
-#include <linux/of_device.h>
/* m = num_leds*/
#define PCA9532_REG_INPUT(i) ((i) >> 3)
diff --git a/drivers/leds/leds-pca995x.c b/drivers/leds/leds-pca995x.c
new file mode 100644
index 000000000000..78215dff1499
--- /dev/null
+++ b/drivers/leds/leds-pca995x.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * LED driver for PCA995x I2C LED drivers
+ *
+ * Copyright 2011 bct electronic GmbH
+ * Copyright 2013 Qtechnology/AS
+ * Copyright 2022 NXP
+ * Copyright 2023 Marek Vasut
+ */
+
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+/* Register definition */
+#define PCA995X_MODE1 0x00
+#define PCA995X_MODE2 0x01
+#define PCA995X_LEDOUT0 0x02
+#define PCA9955B_PWM0 0x08
+#define PCA9952_PWM0 0x0A
+#define PCA9952_IREFALL 0x43
+#define PCA9955B_IREFALL 0x45
+
+/* Auto-increment disabled. Normal mode */
+#define PCA995X_MODE1_CFG 0x00
+
+/* LED select registers determine the source that drives LED outputs */
+#define PCA995X_LED_OFF 0x0
+#define PCA995X_LED_ON 0x1
+#define PCA995X_LED_PWM_MODE 0x2
+#define PCA995X_LDRX_MASK 0x3
+#define PCA995X_LDRX_BITS 2
+
+#define PCA995X_MAX_OUTPUTS 16
+#define PCA995X_OUTPUTS_PER_REG 4
+
+#define PCA995X_IREFALL_FULL_CFG 0xFF
+#define PCA995X_IREFALL_HALF_CFG (PCA995X_IREFALL_FULL_CFG / 2)
+
+#define PCA995X_TYPE_NON_B 0
+#define PCA995X_TYPE_B 1
+
+#define ldev_to_led(c) container_of(c, struct pca995x_led, ldev)
+
+struct pca995x_led {
+ unsigned int led_no;
+ struct led_classdev ldev;
+ struct pca995x_chip *chip;
+};
+
+struct pca995x_chip {
+ struct regmap *regmap;
+ struct pca995x_led leds[PCA995X_MAX_OUTPUTS];
+ int btype;
+};
+
+static int pca995x_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct pca995x_led *led = ldev_to_led(led_cdev);
+ struct pca995x_chip *chip = led->chip;
+ u8 ledout_addr, pwmout_addr;
+ int shift, ret;
+
+ pwmout_addr = (chip->btype ? PCA9955B_PWM0 : PCA9952_PWM0) + led->led_no;
+ ledout_addr = PCA995X_LEDOUT0 + (led->led_no / PCA995X_OUTPUTS_PER_REG);
+ shift = PCA995X_LDRX_BITS * (led->led_no % PCA995X_OUTPUTS_PER_REG);
+
+ switch (brightness) {
+ case LED_FULL:
+ return regmap_update_bits(chip->regmap, ledout_addr,
+ PCA995X_LDRX_MASK << shift,
+ PCA995X_LED_ON << shift);
+ case LED_OFF:
+ return regmap_update_bits(chip->regmap, ledout_addr,
+ PCA995X_LDRX_MASK << shift, 0);
+ default:
+ /* Adjust brightness as per user input by changing individual PWM */
+ ret = regmap_write(chip->regmap, pwmout_addr, brightness);
+ if (ret)
+ return ret;
+
+ /*
+ * Change LDRx configuration to individual brightness via PWM.
+ * LED will stop blinking if it's doing so.
+ */
+ return regmap_update_bits(chip->regmap, ledout_addr,
+ PCA995X_LDRX_MASK << shift,
+ PCA995X_LED_PWM_MODE << shift);
+ }
+}
+
+static const struct regmap_config pca995x_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x49,
+};
+
+static int pca995x_probe(struct i2c_client *client)
+{
+ struct fwnode_handle *led_fwnodes[PCA995X_MAX_OUTPUTS] = { 0 };
+ struct fwnode_handle *np, *child;
+ struct device *dev = &client->dev;
+ struct pca995x_chip *chip;
+ struct pca995x_led *led;
+ int i, btype, reg, ret;
+
+ btype = (unsigned long)device_get_match_data(&client->dev);
+
+ np = dev_fwnode(dev);
+ if (!np)
+ return -ENODEV;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->btype = btype;
+ chip->regmap = devm_regmap_init_i2c(client, &pca995x_regmap);
+ if (IS_ERR(chip->regmap))
+ return PTR_ERR(chip->regmap);
+
+ i2c_set_clientdata(client, chip);
+
+ fwnode_for_each_available_child_node(np, child) {
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret) {
+ fwnode_handle_put(child);
+ return ret;
+ }
+
+ if (reg < 0 || reg >= PCA995X_MAX_OUTPUTS || led_fwnodes[reg]) {
+ fwnode_handle_put(child);
+ return -EINVAL;
+ }
+
+ led = &chip->leds[reg];
+ led_fwnodes[reg] = child;
+ led->chip = chip;
+ led->led_no = reg;
+ led->ldev.brightness_set_blocking = pca995x_brightness_set;
+ led->ldev.max_brightness = 255;
+ }
+
+ for (i = 0; i < PCA995X_MAX_OUTPUTS; i++) {
+ struct led_init_data init_data = {};
+
+ if (!led_fwnodes[i])
+ continue;
+
+ init_data.fwnode = led_fwnodes[i];
+
+ ret = devm_led_classdev_register_ext(dev,
+ &chip->leds[i].ldev,
+ &init_data);
+ if (ret < 0) {
+ fwnode_handle_put(child);
+ return dev_err_probe(dev, ret,
+ "Could not register LED %s\n",
+ chip->leds[i].ldev.name);
+ }
+ }
+
+ /* Disable LED all-call address and set normal mode */
+ ret = regmap_write(chip->regmap, PCA995X_MODE1, PCA995X_MODE1_CFG);
+ if (ret)
+ return ret;
+
+ /* IREF Output current value for all LEDn outputs */
+ return regmap_write(chip->regmap,
+ btype ? PCA9955B_IREFALL : PCA9952_IREFALL,
+ PCA995X_IREFALL_HALF_CFG);
+}
+
+static const struct i2c_device_id pca995x_id[] = {
+ { "pca9952", .driver_data = (kernel_ulong_t)PCA995X_TYPE_NON_B },
+ { "pca9955b", .driver_data = (kernel_ulong_t)PCA995X_TYPE_B },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, pca995x_id);
+
+static const struct of_device_id pca995x_of_match[] = {
+ { .compatible = "nxp,pca9952", .data = (void *)PCA995X_TYPE_NON_B },
+ { .compatible = "nxp,pca9955b", .data = (void *)PCA995X_TYPE_B },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pca995x_of_match);
+
+static struct i2c_driver pca995x_driver = {
+ .driver = {
+ .name = "leds-pca995x",
+ .of_match_table = pca995x_of_match,
+ },
+ .probe = pca995x_probe,
+ .id_table = pca995x_id,
+};
+module_i2c_driver(pca995x_driver);
+
+MODULE_AUTHOR("Isai Gaspar <isaiezequiel.gaspar@nxp.com>");
+MODULE_DESCRIPTION("PCA995x LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-pm8058.c b/drivers/leds/leds-pm8058.c
index b9233f14b646..3f49a5181892 100644
--- a/drivers/leds/leds-pm8058.c
+++ b/drivers/leds/leds-pm8058.c
@@ -4,7 +4,6 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/regmap.h>
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 29194cc382af..419b710984ab 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -12,7 +12,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/leds.h>
#include <linux/err.h>
#include <linux/pwm.h>
@@ -146,7 +146,7 @@ static int led_pwm_create_fwnode(struct device *dev, struct led_pwm_priv *priv)
led.name = to_of_node(fwnode)->name;
if (!led.name) {
- ret = EINVAL;
+ ret = -EINVAL;
goto err_child_out;
}
diff --git a/drivers/leds/leds-spi-byte.c b/drivers/leds/leds-spi-byte.c
index 2c7ffc3c78e6..9d91f21842f2 100644
--- a/drivers/leds/leds-spi-byte.c
+++ b/drivers/leds/leds-spi-byte.c
@@ -30,7 +30,7 @@
#include <linux/leds.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/spi/spi.h>
#include <linux/mutex.h>
#include <uapi/linux/uleds.h>
diff --git a/drivers/leds/leds-syscon.c b/drivers/leds/leds-syscon.c
index e38abb5e60c1..360a376fa738 100644
--- a/drivers/leds/leds-syscon.c
+++ b/drivers/leds/leds-syscon.c
@@ -7,8 +7,7 @@
*/
#include <linux/io.h>
#include <linux/init.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/stat.h>
#include <linux/slab.h>
diff --git a/drivers/leds/leds-ti-lmu-common.c b/drivers/leds/leds-ti-lmu-common.c
index d7f10ad721ba..b2491666b5dc 100644
--- a/drivers/leds/leds-ti-lmu-common.c
+++ b/drivers/leds/leds-ti-lmu-common.c
@@ -7,7 +7,7 @@
#include <linux/bitops.h>
#include <linux/err.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/leds-ti-lmu-common.h>
diff --git a/drivers/leds/leds-tlc591xx.c b/drivers/leds/leds-tlc591xx.c
index dfc6fb2b3e52..945e831ef4ac 100644
--- a/drivers/leds/leds-tlc591xx.c
+++ b/drivers/leds/leds-tlc591xx.c
@@ -8,7 +8,6 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index 64b2d7b6d3f3..b8a95a917cfa 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -156,24 +156,20 @@ static ssize_t brightness_show(struct device *dev, struct device_attribute *a,
char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
- struct omnia_leds *leds = i2c_get_clientdata(client);
int ret;
- mutex_lock(&leds->lock);
ret = i2c_smbus_read_byte_data(client, CMD_LED_GET_BRIGHTNESS);
- mutex_unlock(&leds->lock);
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", ret);
+ return sysfs_emit(buf, "%d\n", ret);
}
static ssize_t brightness_store(struct device *dev, struct device_attribute *a,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
- struct omnia_leds *leds = i2c_get_clientdata(client);
unsigned long brightness;
int ret;
@@ -183,15 +179,10 @@ static ssize_t brightness_store(struct device *dev, struct device_attribute *a,
if (brightness > 100)
return -EINVAL;
- mutex_lock(&leds->lock);
ret = i2c_smbus_write_byte_data(client, CMD_LED_SET_BRIGHTNESS,
(u8)brightness);
- mutex_unlock(&leds->lock);
-
- if (ret < 0)
- return ret;
- return count;
+ return ret < 0 ? ret : count;
}
static DEVICE_ATTR_RW(brightness);
diff --git a/drivers/leds/rgb/Kconfig b/drivers/leds/rgb/Kconfig
index 360c8679c6e2..183bccc06cf3 100644
--- a/drivers/leds/rgb/Kconfig
+++ b/drivers/leds/rgb/Kconfig
@@ -2,6 +2,18 @@
if LEDS_CLASS_MULTICOLOR
+config LEDS_GROUP_MULTICOLOR
+ tristate "LEDs group multi-color support"
+ depends on OF || COMPILE_TEST
+ help
+ This option enables support for monochrome LEDs that are grouped
+ into multicolor LEDs which is useful in the case where LEDs of
+ different colors are physically grouped in a single multi-color LED
+ and driven by a controller that doesn't have multi-color support.
+
+ To compile this driver as a module, choose M here: the module
+ will be called leds-group-multicolor.
+
config LEDS_PWM_MULTICOLOR
tristate "PWM driven multi-color LED Support"
depends on PWM
diff --git a/drivers/leds/rgb/Makefile b/drivers/leds/rgb/Makefile
index 8c01daf63f61..c11cc56384e7 100644
--- a/drivers/leds/rgb/Makefile
+++ b/drivers/leds/rgb/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_LEDS_GROUP_MULTICOLOR) += leds-group-multicolor.o
obj-$(CONFIG_LEDS_PWM_MULTICOLOR) += leds-pwm-multicolor.o
obj-$(CONFIG_LEDS_QCOM_LPG) += leds-qcom-lpg.o
obj-$(CONFIG_LEDS_MT6370_RGB) += leds-mt6370-rgb.o
diff --git a/drivers/leds/rgb/leds-group-multicolor.c b/drivers/leds/rgb/leds-group-multicolor.c
new file mode 100644
index 000000000000..39f58be32af5
--- /dev/null
+++ b/drivers/leds/rgb/leds-group-multicolor.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Multi-color LED built with monochromatic LED devices
+ *
+ * This driver groups several monochromatic LED devices in a single multicolor LED device.
+ *
+ * Compared to handling this grouping in user-space, the benefits are:
+ * - The state of the monochromatic LED relative to each other is always consistent.
+ * - The sysfs interface of the LEDs can be used for the group as a whole.
+ *
+ * Copyright 2023 Jean-Jacques Hiblot <jjhiblot@traphandler.com>
+ */
+
+#include <linux/err.h>
+#include <linux/leds.h>
+#include <linux/led-class-multicolor.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+struct leds_multicolor {
+ struct led_classdev_mc mc_cdev;
+ struct led_classdev **monochromatics;
+};
+
+static int leds_gmc_set(struct led_classdev *cdev, enum led_brightness brightness)
+{
+ struct led_classdev_mc *mc_cdev = lcdev_to_mccdev(cdev);
+ struct leds_multicolor *priv = container_of(mc_cdev, struct leds_multicolor, mc_cdev);
+ const unsigned int group_max_brightness = mc_cdev->led_cdev.max_brightness;
+ int i;
+
+ for (i = 0; i < mc_cdev->num_colors; i++) {
+ struct led_classdev *mono = priv->monochromatics[i];
+ const unsigned int mono_max_brightness = mono->max_brightness;
+ unsigned int intensity = mc_cdev->subled_info[i].intensity;
+ int mono_brightness;
+
+ /*
+ * Scale the brightness according to relative intensity of the
+ * color AND the max brightness of the monochromatic LED.
+ */
+ mono_brightness = DIV_ROUND_CLOSEST(brightness * intensity * mono_max_brightness,
+ group_max_brightness * group_max_brightness);
+
+ led_set_brightness(mono, mono_brightness);
+ }
+
+ return 0;
+}
+
+static void restore_sysfs_write_access(void *data)
+{
+ struct led_classdev *led_cdev = data;
+
+ /* Restore the write acccess to the LED */
+ mutex_lock(&led_cdev->led_access);
+ led_sysfs_enable(led_cdev);
+ mutex_unlock(&led_cdev->led_access);
+}
+
+static int leds_gmc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct led_init_data init_data = {};
+ struct led_classdev *cdev;
+ struct mc_subled *subled;
+ struct leds_multicolor *priv;
+ unsigned int max_brightness = 0;
+ int i, ret, count = 0;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ for (;;) {
+ struct led_classdev *led_cdev;
+
+ led_cdev = devm_of_led_get_optional(dev, count);
+ if (IS_ERR(led_cdev))
+ return dev_err_probe(dev, PTR_ERR(led_cdev), "Unable to get LED #%d",
+ count);
+ if (!led_cdev)
+ break;
+
+ priv->monochromatics = devm_krealloc_array(dev, priv->monochromatics,
+ count + 1, sizeof(*priv->monochromatics),
+ GFP_KERNEL);
+ if (!priv->monochromatics)
+ return -ENOMEM;
+
+ priv->monochromatics[count] = led_cdev;
+
+ max_brightness = max(max_brightness, led_cdev->max_brightness);
+
+ count++;
+ }
+
+ subled = devm_kcalloc(dev, count, sizeof(*subled), GFP_KERNEL);
+ if (!subled)
+ return -ENOMEM;
+ priv->mc_cdev.subled_info = subled;
+
+ for (i = 0; i < count; i++) {
+ struct led_classdev *led_cdev = priv->monochromatics[i];
+
+ subled[i].color_index = led_cdev->color;
+
+ /* Configure the LED intensity to its maximum */
+ subled[i].intensity = max_brightness;
+ }
+
+ /* Initialise the multicolor's LED class device */
+ cdev = &priv->mc_cdev.led_cdev;
+ cdev->flags = LED_CORE_SUSPENDRESUME;
+ cdev->brightness_set_blocking = leds_gmc_set;
+ cdev->max_brightness = max_brightness;
+ cdev->color = LED_COLOR_ID_MULTI;
+ priv->mc_cdev.num_colors = count;
+
+ init_data.fwnode = dev_fwnode(dev);
+ ret = devm_led_classdev_multicolor_register_ext(dev, &priv->mc_cdev, &init_data);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register multicolor LED for %s.\n",
+ cdev->name);
+
+ ret = leds_gmc_set(cdev, cdev->brightness);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to set LED value for %s.", cdev->name);
+
+ for (i = 0; i < count; i++) {
+ struct led_classdev *led_cdev = priv->monochromatics[i];
+
+ /*
+ * Make the individual LED sysfs interface read-only to prevent the user
+ * to change the brightness of the individual LEDs of the group.
+ */
+ mutex_lock(&led_cdev->led_access);
+ led_sysfs_disable(led_cdev);
+ mutex_unlock(&led_cdev->led_access);
+
+ /* Restore the write access to the LED sysfs when the group is destroyed */
+ devm_add_action_or_reset(dev, restore_sysfs_write_access, led_cdev);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id of_leds_group_multicolor_match[] = {
+ { .compatible = "leds-group-multicolor" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_leds_group_multicolor_match);
+
+static struct platform_driver leds_group_multicolor_driver = {
+ .probe = leds_gmc_probe,
+ .driver = {
+ .name = "leds_group_multicolor",
+ .of_match_table = of_leds_group_multicolor_match,
+ }
+};
+module_platform_driver(leds_group_multicolor_driver);
+
+MODULE_AUTHOR("Jean-Jacques Hiblot <jjhiblot@traphandler.com>");
+MODULE_DESCRIPTION("LEDs group multicolor driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-group-multicolor");
diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
index 59581b3e25ca..df469aaa7e6e 100644
--- a/drivers/leds/rgb/leds-qcom-lpg.c
+++ b/drivers/leds/rgb/leds-qcom-lpg.c
@@ -9,7 +9,6 @@
#include <linux/led-class-multicolor.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
@@ -1093,7 +1092,6 @@ static int lpg_add_pwm(struct lpg *lpg)
{
int ret;
- lpg->pwm.base = -1;
lpg->pwm.dev = lpg->dev;
lpg->pwm.npwm = lpg->num_channels;
lpg->pwm.ops = &lpg_pwm_ops;
diff --git a/drivers/leds/simple/Kconfig b/drivers/leds/simple/Kconfig
index 609e438af9f6..e616cc6d6051 100644
--- a/drivers/leds/simple/Kconfig
+++ b/drivers/leds/simple/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config LEDS_SIEMENS_SIMATIC_IPC
tristate "LED driver for Siemens Simatic IPCs"
+ depends on LEDS_CLASS
depends on SIEMENS_SIMATIC_IPC
default y
help
@@ -35,3 +36,16 @@ config LEDS_SIEMENS_SIMATIC_IPC_F7188X
To compile this driver as a module, choose M here: the module
will be called simatic-ipc-leds-gpio-f7188x.
+
+config LEDS_SIEMENS_SIMATIC_IPC_ELKHARTLAKE
+ tristate "LED driver for Siemens Simatic IPCs based on Intel Elkhart Lake GPIO"
+ depends on LEDS_GPIO
+ depends on PINCTRL_ELKHARTLAKE
+ depends on SIEMENS_SIMATIC_IPC
+ default LEDS_SIEMENS_SIMATIC_IPC
+ help
+ This option enables support for the LEDs of several Industrial PCs
+ from Siemens based on Elkhart Lake GPIO i.e. BX-21A.
+
+ To compile this driver as a module, choose M here: the module
+ will be called simatic-ipc-leds-gpio-elkhartlake.
diff --git a/drivers/leds/simple/Makefile b/drivers/leds/simple/Makefile
index e3e840cea275..783578f11bb0 100644
--- a/drivers/leds/simple/Makefile
+++ b/drivers/leds/simple/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_LEDS_SIEMENS_SIMATIC_IPC) += simatic-ipc-leds.o
obj-$(CONFIG_LEDS_SIEMENS_SIMATIC_IPC_APOLLOLAKE) += simatic-ipc-leds-gpio-core.o simatic-ipc-leds-gpio-apollolake.o
obj-$(CONFIG_LEDS_SIEMENS_SIMATIC_IPC_F7188X) += simatic-ipc-leds-gpio-core.o simatic-ipc-leds-gpio-f7188x.o
+obj-$(CONFIG_LEDS_SIEMENS_SIMATIC_IPC_ELKHARTLAKE) += simatic-ipc-leds-gpio-core.o simatic-ipc-leds-gpio-elkhartlake.o
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio-core.c b/drivers/leds/simple/simatic-ipc-leds-gpio-core.c
index 2a21b663df87..c552ea73ed9d 100644
--- a/drivers/leds/simple/simatic-ipc-leds-gpio-core.c
+++ b/drivers/leds/simple/simatic-ipc-leds-gpio-core.c
@@ -57,6 +57,7 @@ int simatic_ipc_leds_gpio_probe(struct platform_device *pdev,
switch (plat->devmode) {
case SIMATIC_IPC_DEVICE_127E:
case SIMATIC_IPC_DEVICE_227G:
+ case SIMATIC_IPC_DEVICE_BX_21A:
break;
default:
return -ENODEV;
@@ -72,6 +73,9 @@ int simatic_ipc_leds_gpio_probe(struct platform_device *pdev,
goto out;
}
+ if (!table_extra)
+ return 0;
+
table_extra->dev_id = dev_name(dev);
gpiod_add_lookup_table(table_extra);
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c b/drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c
new file mode 100644
index 000000000000..6ba21dbb3ba0
--- /dev/null
+++ b/drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Siemens SIMATIC IPC driver for GPIO based LEDs
+ *
+ * Copyright (c) Siemens AG, 2023
+ *
+ * Author:
+ * Henning Schild <henning.schild@siemens.com>
+ */
+
+#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/x86/simatic-ipc-base.h>
+
+#include "simatic-ipc-leds-gpio.h"
+
+static struct gpiod_lookup_table simatic_ipc_led_gpio_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("INTC1020:04", 72, NULL, 0, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("INTC1020:04", 77, NULL, 1, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("INTC1020:04", 78, NULL, 2, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("INTC1020:04", 58, NULL, 3, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("INTC1020:04", 60, NULL, 4, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("INTC1020:04", 62, NULL, 5, GPIO_ACTIVE_HIGH),
+ {} /* Terminating entry */
+ },
+};
+
+static int simatic_ipc_leds_gpio_elkhartlake_probe(struct platform_device *pdev)
+{
+ return simatic_ipc_leds_gpio_probe(pdev, &simatic_ipc_led_gpio_table,
+ NULL);
+}
+
+static int simatic_ipc_leds_gpio_elkhartlake_remove(struct platform_device *pdev)
+{
+ return simatic_ipc_leds_gpio_remove(pdev, &simatic_ipc_led_gpio_table,
+ NULL);
+}
+
+static struct platform_driver simatic_ipc_led_gpio_elkhartlake_driver = {
+ .probe = simatic_ipc_leds_gpio_elkhartlake_probe,
+ .remove = simatic_ipc_leds_gpio_elkhartlake_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+};
+module_platform_driver(simatic_ipc_led_gpio_elkhartlake_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_SOFTDEP("pre: simatic-ipc-leds-gpio-core platform:elkhartlake-pinctrl");
+MODULE_AUTHOR("Henning Schild <henning.schild@siemens.com>");
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio.h b/drivers/leds/simple/simatic-ipc-leds-gpio.h
index bf258c32f83d..3d4877aa4e0c 100644
--- a/drivers/leds/simple/simatic-ipc-leds-gpio.h
+++ b/drivers/leds/simple/simatic-ipc-leds-gpio.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Siemens SIMATIC IPC driver for GPIO based LEDs
*
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index cc3261543a5e..58f3352539e8 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -609,18 +609,7 @@ static struct led_trigger netdev_led_trigger = {
.groups = netdev_trig_groups,
};
-static int __init netdev_trig_init(void)
-{
- return led_trigger_register(&netdev_led_trigger);
-}
-
-static void __exit netdev_trig_exit(void)
-{
- led_trigger_unregister(&netdev_led_trigger);
-}
-
-module_init(netdev_trig_init);
-module_exit(netdev_trig_exit);
+module_led_trigger(netdev_led_trigger);
MODULE_AUTHOR("Ben Whitten <ben.whitten@gmail.com>");
MODULE_AUTHOR("Oliver Jowett <oliver@opencloud.com>");
diff --git a/drivers/leds/trigger/ledtrig-tty.c b/drivers/leds/trigger/ledtrig-tty.c
index f62db7e520b5..8ae0d2d284af 100644
--- a/drivers/leds/trigger/ledtrig-tty.c
+++ b/drivers/leds/trigger/ledtrig-tty.c
@@ -7,6 +7,8 @@
#include <linux/tty.h>
#include <uapi/linux/serial.h>
+#define LEDTRIG_TTY_INTERVAL 50
+
struct ledtrig_tty_data {
struct led_classdev *led_cdev;
struct delayed_work dwork;
@@ -122,17 +124,19 @@ static void ledtrig_tty_work(struct work_struct *work)
if (icount.rx != trigger_data->rx ||
icount.tx != trigger_data->tx) {
- led_set_brightness_sync(trigger_data->led_cdev, LED_ON);
+ unsigned long interval = LEDTRIG_TTY_INTERVAL;
+
+ led_blink_set_oneshot(trigger_data->led_cdev, &interval,
+ &interval, 0);
trigger_data->rx = icount.rx;
trigger_data->tx = icount.tx;
- } else {
- led_set_brightness_sync(trigger_data->led_cdev, LED_OFF);
}
out:
mutex_unlock(&trigger_data->mutex);
- schedule_delayed_work(&trigger_data->dwork, msecs_to_jiffies(100));
+ schedule_delayed_work(&trigger_data->dwork,
+ msecs_to_jiffies(LEDTRIG_TTY_INTERVAL * 2));
}
static struct attribute *ledtrig_tty_attrs[] = {
diff --git a/drivers/leds/uleds.c b/drivers/leds/uleds.c
index 7320337b22d2..3d361c920030 100644
--- a/drivers/leds/uleds.c
+++ b/drivers/leds/uleds.c
@@ -209,17 +209,7 @@ static struct miscdevice uleds_misc = {
.name = ULEDS_NAME,
};
-static int __init uleds_init(void)
-{
- return misc_register(&uleds_misc);
-}
-module_init(uleds_init);
-
-static void __exit uleds_exit(void)
-{
- misc_deregister(&uleds_misc);
-}
-module_exit(uleds_exit);
+module_misc_device(uleds_misc);
MODULE_AUTHOR("David Lechner <david@lechnology.com>");
MODULE_DESCRIPTION("Userspace driver for the LED subsystem");
diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c
index 22243cabe056..537f7bfb7b06 100644
--- a/drivers/mailbox/arm_mhu.c
+++ b/drivers/mailbox/arm_mhu.c
@@ -12,6 +12,7 @@
#include <linux/io.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
+#include <linux/of.h>
#define INTR_STAT_OFS 0x0
#define INTR_SET_OFS 0x8
diff --git a/drivers/mailbox/arm_mhu_db.c b/drivers/mailbox/arm_mhu_db.c
index aa0a4d83880f..27a510d46908 100644
--- a/drivers/mailbox/arm_mhu_db.c
+++ b/drivers/mailbox/arm_mhu_db.c
@@ -15,7 +15,6 @@
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#define INTR_STAT_OFS 0x0
#define INTR_SET_OFS 0x8
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index bf6e86b0ed09..a2b8839d4e7c 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1501,16 +1501,12 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
mbox->dev = dev;
platform_set_drvdata(pdev, mbox);
- /* Get resource for registers */
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ /* Get resource for registers and map registers of all rings */
+ mbox->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &iomem);
if (!iomem || (resource_size(iomem) < RING_REGS_SIZE)) {
ret = -ENODEV;
goto fail;
- }
-
- /* Map registers of all rings */
- mbox->regs = devm_ioremap_resource(&pdev->dev, iomem);
- if (IS_ERR(mbox->regs)) {
+ } else if (IS_ERR(mbox->regs)) {
ret = PTR_ERR(mbox->regs);
goto fail;
}
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index 8c95e3ce295f..d67db63b482d 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -694,7 +694,7 @@ pdc_receive(struct pdc_state *pdcs)
* pdc_tx_list_sg_add() - Add the buffers in a scatterlist to the transmit
* descriptors for a given SPU. The scatterlist buffers contain the data for a
* SPU request message.
- * @spu_idx: The index of the SPU to submit the request to, [0, max_spu)
+ * @pdcs: PDC state for the SPU that will process this request
* @sg: Scatterlist whose buffers contain part of the SPU request
*
* If a scatterlist buffer is larger than PDC_DMA_BUF_MAX, multiple descriptors
@@ -861,7 +861,7 @@ static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
* pdc_rx_list_sg_add() - Add the buffers in a scatterlist to the receive
* descriptors for a given SPU. The caller must have already DMA mapped the
* scatterlist.
- * @spu_idx: Indicates which SPU the buffers are for
+ * @pdcs: PDC state for the SPU that will process this request
* @sg: Scatterlist whose buffers are added to the receive ring
*
* If a receive buffer in the scatterlist is larger than PDC_DMA_BUF_MAX,
@@ -960,7 +960,7 @@ static irqreturn_t pdc_irq_handler(int irq, void *data)
/**
* pdc_tasklet_cb() - Tasklet callback that runs the deferred processing after
* a DMA receive interrupt. Reenables the receive interrupt.
- * @data: PDC state structure
+ * @t: Pointer to the Altera sSGDMA channel structure
*/
static void pdc_tasklet_cb(struct tasklet_struct *t)
{
@@ -1566,19 +1566,13 @@ static int pdc_probe(struct platform_device *pdev)
if (err)
goto cleanup_ring_pool;
- pdc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!pdc_regs) {
- err = -ENODEV;
- goto cleanup_ring_pool;
- }
- dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa",
- &pdc_regs->start, &pdc_regs->end);
-
- pdcs->pdc_reg_vbase = devm_ioremap_resource(&pdev->dev, pdc_regs);
+ pdcs->pdc_reg_vbase = devm_platform_get_and_ioremap_resource(pdev, 0, &pdc_regs);
if (IS_ERR(pdcs->pdc_reg_vbase)) {
err = PTR_ERR(pdcs->pdc_reg_vbase);
goto cleanup_ring_pool;
}
+ dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa",
+ &pdc_regs->start, &pdc_regs->end);
/* create rx buffer pool after dt read to know how big buffers are */
err = pdc_rx_buf_pool_create(pdcs);
diff --git a/drivers/mailbox/hi3660-mailbox.c b/drivers/mailbox/hi3660-mailbox.c
index ab24e731a782..17c29e960fbf 100644
--- a/drivers/mailbox/hi3660-mailbox.c
+++ b/drivers/mailbox/hi3660-mailbox.c
@@ -11,6 +11,7 @@
#include <linux/iopoll.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/mailbox/hi6220-mailbox.c b/drivers/mailbox/hi6220-mailbox.c
index 1c73c63598f5..f77741ce42e7 100644
--- a/drivers/mailbox/hi6220-mailbox.c
+++ b/drivers/mailbox/hi6220-mailbox.c
@@ -15,6 +15,7 @@
#include <linux/kfifo.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 20f2ec880ad6..3ef4dd8adf5d 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -14,7 +14,8 @@
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
#include <linux/slab.h>
diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c
index 162df49654fb..20ee283a04cc 100644
--- a/drivers/mailbox/mailbox-mpfs.c
+++ b/drivers/mailbox/mailbox-mpfs.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/mailbox_controller.h>
#include <soc/microchip/mpfs.h>
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index fc6a12a51b40..22d6018ceec3 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -367,8 +367,7 @@ static int mbox_test_probe(struct platform_device *pdev)
return -ENOMEM;
/* It's okay for MMIO to be NULL */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tdev->tx_mmio = devm_ioremap_resource(&pdev->dev, res);
+ tdev->tx_mmio = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (PTR_ERR(tdev->tx_mmio) == -EBUSY) {
/* if reserved area in SRAM, try just ioremap */
size = resource_size(res);
@@ -378,8 +377,7 @@ static int mbox_test_probe(struct platform_device *pdev)
}
/* If specified, second reg entry is Rx MMIO */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- tdev->rx_mmio = devm_ioremap_resource(&pdev->dev, res);
+ tdev->rx_mmio = devm_platform_get_and_ioremap_resource(pdev, 1, &res);
if (PTR_ERR(tdev->rx_mmio) == -EBUSY) {
size = resource_size(res);
tdev->rx_mmio = devm_ioremap(&pdev->dev, res->start, size);
@@ -390,7 +388,7 @@ static int mbox_test_probe(struct platform_device *pdev)
tdev->tx_channel = mbox_test_request_channel(pdev, "tx");
tdev->rx_channel = mbox_test_request_channel(pdev, "rx");
- if (!tdev->tx_channel && !tdev->rx_channel)
+ if (IS_ERR_OR_NULL(tdev->tx_channel) && IS_ERR_OR_NULL(tdev->rx_channel))
return -EPROBE_DEFER;
/* If Rx is not specified but has Rx MMIO, then Rx = Tx */
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index adf36c05fa43..ebff3baf3045 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -17,6 +17,7 @@
#include <linux/bitops.h>
#include <linux/mailbox_client.h>
#include <linux/mailbox_controller.h>
+#include <linux/of.h>
#include "mailbox.h"
diff --git a/drivers/mailbox/mtk-adsp-mailbox.c b/drivers/mailbox/mtk-adsp-mailbox.c
index 14bc0057de81..91487aa4d7da 100644
--- a/drivers/mailbox/mtk-adsp-mailbox.c
+++ b/drivers/mailbox/mtk-adsp-mailbox.c
@@ -10,7 +10,8 @@
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
struct mtk_adsp_mbox_priv {
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index b18d47ea13a0..4d62b07c1411 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -15,7 +15,7 @@
#include <linux/platform_device.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox/mtk-cmdq-mailbox.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT)
#define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE)
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index fa2ce3246b70..792bcaebbc9b 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -16,7 +16,7 @@
#include <linux/kfifo.h>
#include <linux/err.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/omap-mailbox.h>
diff --git a/drivers/mailbox/platform_mhu.c b/drivers/mailbox/platform_mhu.c
index a5922ac0b0bf..834aecd720ac 100644
--- a/drivers/mailbox/platform_mhu.c
+++ b/drivers/mailbox/platform_mhu.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mailbox_controller.h>
@@ -135,10 +136,8 @@ static int platform_mhu_probe(struct platform_device *pdev)
for (i = 0; i < MHU_CHANS; i++) {
mhu->chan[i].con_priv = &mhu->mlink[i];
mhu->mlink[i].irq = platform_get_irq(pdev, i);
- if (mhu->mlink[i].irq < 0) {
- dev_err(dev, "failed to get irq%d\n", i);
+ if (mhu->mlink[i].irq < 0)
return mhu->mlink[i].irq;
- }
mhu->mlink[i].rx_reg = mhu->base + platform_mhu_reg[i];
mhu->mlink[i].tx_reg = mhu->mlink[i].rx_reg + TX_REG_OFFSET;
}
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
index 7e27acf6c0cc..f597a1bd5684 100644
--- a/drivers/mailbox/qcom-ipcc.c
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -227,10 +227,8 @@ static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc,
ret = of_parse_phandle_with_args(client_dn, "mboxes",
"#mbox-cells", j, &curr_ph);
of_node_put(curr_ph.np);
- if (!ret && curr_ph.np == controller_dn) {
+ if (!ret && curr_ph.np == controller_dn)
ipcc->num_chans++;
- break;
- }
}
}
diff --git a/drivers/mailbox/rockchip-mailbox.c b/drivers/mailbox/rockchip-mailbox.c
index 116286ecc5a0..8ffad059e898 100644
--- a/drivers/mailbox/rockchip-mailbox.c
+++ b/drivers/mailbox/rockchip-mailbox.c
@@ -8,8 +8,8 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
+#include <linux/of.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#define MAILBOX_A2B_INTEN 0x00
@@ -194,11 +194,7 @@ static int rockchip_mbox_probe(struct platform_device *pdev)
mb->mbox.ops = &rockchip_mbox_chan_ops;
mb->mbox.txdone_irq = true;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- mb->mbox_base = devm_ioremap_resource(&pdev->dev, res);
+ mb->mbox_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(mb->mbox_base))
return PTR_ERR(mb->mbox_base);
diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c
index e3c899abeed8..9ae57de77d4d 100644
--- a/drivers/mailbox/sprd-mailbox.c
+++ b/drivers/mailbox/sprd-mailbox.c
@@ -11,7 +11,7 @@
#include <linux/io.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c
index 15d538fe2113..4ad3653f3866 100644
--- a/drivers/mailbox/stm32-ipcc.c
+++ b/drivers/mailbox/stm32-ipcc.c
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
index 7f98e7436d94..fe29fc2ca526 100644
--- a/drivers/mailbox/tegra-hsp.c
+++ b/drivers/mailbox/tegra-hsp.c
@@ -8,7 +8,6 @@
#include <linux/io.h>
#include <linux/mailbox_controller.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
@@ -728,7 +727,6 @@ static int tegra_hsp_request_shared_irq(struct tegra_hsp *hsp)
static int tegra_hsp_probe(struct platform_device *pdev)
{
struct tegra_hsp *hsp;
- struct resource *res;
unsigned int i;
u32 value;
int err;
@@ -742,8 +740,7 @@ static int tegra_hsp_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&hsp->doorbells);
spin_lock_init(&hsp->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hsp->regs = devm_ioremap_resource(&pdev->dev, res);
+ hsp->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hsp->regs))
return PTR_ERR(hsp->regs);
diff --git a/drivers/mailbox/ti-msgmgr.c b/drivers/mailbox/ti-msgmgr.c
index 03048cbda525..a94577f16a47 100644
--- a/drivers/mailbox/ti-msgmgr.c
+++ b/drivers/mailbox/ti-msgmgr.c
@@ -812,7 +812,6 @@ static int ti_msgmgr_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct of_device_id *of_id;
struct device_node *np;
- struct resource *res;
const struct ti_msgmgr_desc *desc;
struct ti_msgmgr_inst *inst;
struct ti_queue_inst *qinst;
@@ -843,22 +842,19 @@ static int ti_msgmgr_probe(struct platform_device *pdev)
inst->dev = dev;
inst->desc = desc;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- desc->data_region_name);
- inst->queue_proxy_region = devm_ioremap_resource(dev, res);
+ inst->queue_proxy_region =
+ devm_platform_ioremap_resource_byname(pdev, desc->data_region_name);
if (IS_ERR(inst->queue_proxy_region))
return PTR_ERR(inst->queue_proxy_region);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- desc->status_region_name);
- inst->queue_state_debug_region = devm_ioremap_resource(dev, res);
+ inst->queue_state_debug_region =
+ devm_platform_ioremap_resource_byname(pdev, desc->status_region_name);
if (IS_ERR(inst->queue_state_debug_region))
return PTR_ERR(inst->queue_state_debug_region);
if (desc->is_sproxy) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- desc->ctrl_region_name);
- inst->queue_ctrl_region = devm_ioremap_resource(dev, res);
+ inst->queue_ctrl_region =
+ devm_platform_ioremap_resource_byname(pdev, desc->ctrl_region_name);
if (IS_ERR(inst->queue_ctrl_region))
return PTR_ERR(inst->queue_ctrl_region);
}
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index d097f45b0e5f..e4fcac97dbfa 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -16,8 +16,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/platform_device.h>
/* IPI agent ID any */
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 0d93661f88d3..095b9b49aa82 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -214,6 +214,7 @@ struct dm_table {
/* a list of devices used by this table */
struct list_head devices;
+ struct rw_semaphore devices_lock;
/* events get handed up using this callback */
void (*event_fn)(void *data);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index f2662c21a6df..5315fd261c23 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -753,7 +753,8 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
int err;
u8 *buf;
- reqsize = ALIGN(crypto_skcipher_reqsize(tfm), __alignof__(__le64));
+ reqsize = sizeof(*req) + crypto_skcipher_reqsize(tfm);
+ reqsize = ALIGN(reqsize, __alignof__(__le64));
req = kmalloc(reqsize + cc->iv_size, GFP_NOIO);
if (!req)
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index f5ed729a8e0c..21ebb6c39394 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1630,6 +1630,8 @@ static void retrieve_deps(struct dm_table *table,
struct dm_dev_internal *dd;
struct dm_target_deps *deps;
+ down_read(&table->devices_lock);
+
deps = get_result_buffer(param, param_size, &len);
/*
@@ -1644,7 +1646,7 @@ static void retrieve_deps(struct dm_table *table,
needed = struct_size(deps, dev, count);
if (len < needed) {
param->flags |= DM_BUFFER_FULL_FLAG;
- return;
+ goto out;
}
/*
@@ -1656,6 +1658,9 @@ static void retrieve_deps(struct dm_table *table,
deps->dev[count++] = huge_encode_dev(dd->dm_dev->bdev->bd_dev);
param->data_size = param->data_start + needed;
+
+out:
+ up_read(&table->devices_lock);
}
static int table_deps(struct file *filp, struct dm_ioctl *param, size_t param_size)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 7d208b2b1a19..37b48f63ae6a 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -135,6 +135,7 @@ int dm_table_create(struct dm_table **result, blk_mode_t mode,
return -ENOMEM;
INIT_LIST_HEAD(&t->devices);
+ init_rwsem(&t->devices_lock);
if (!num_targets)
num_targets = KEYS_PER_NODE;
@@ -359,16 +360,20 @@ int __ref dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
if (dev == disk_devt(t->md->disk))
return -EINVAL;
+ down_write(&t->devices_lock);
+
dd = find_device(&t->devices, dev);
if (!dd) {
dd = kmalloc(sizeof(*dd), GFP_KERNEL);
- if (!dd)
- return -ENOMEM;
+ if (!dd) {
+ r = -ENOMEM;
+ goto unlock_ret_r;
+ }
r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev);
if (r) {
kfree(dd);
- return r;
+ goto unlock_ret_r;
}
refcount_set(&dd->count, 1);
@@ -378,12 +383,17 @@ int __ref dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
r = upgrade_mode(dd, mode, t->md);
if (r)
- return r;
+ goto unlock_ret_r;
}
refcount_inc(&dd->count);
out:
+ up_write(&t->devices_lock);
*result = dd->dm_dev;
return 0;
+
+unlock_ret_r:
+ up_write(&t->devices_lock);
+ return r;
}
EXPORT_SYMBOL(dm_get_device);
@@ -419,9 +429,12 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
void dm_put_device(struct dm_target *ti, struct dm_dev *d)
{
int found = 0;
- struct list_head *devices = &ti->table->devices;
+ struct dm_table *t = ti->table;
+ struct list_head *devices = &t->devices;
struct dm_dev_internal *dd;
+ down_write(&t->devices_lock);
+
list_for_each_entry(dd, devices, list) {
if (dd->dm_dev == d) {
found = 1;
@@ -430,14 +443,17 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d)
}
if (!found) {
DMERR("%s: device %s not in table devices list",
- dm_device_name(ti->table->md), d->name);
- return;
+ dm_device_name(t->md), d->name);
+ goto unlock_ret;
}
if (refcount_dec_and_test(&dd->count)) {
- dm_put_table_device(ti->table->md, d);
+ dm_put_table_device(t->md, d);
list_del(&dd->list);
kfree(dd);
}
+
+unlock_ret:
+ up_write(&t->devices_lock);
}
EXPORT_SYMBOL(dm_put_device);
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index ad8e670a2f9b..b487f7acc860 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -748,17 +748,16 @@ err:
/*
* Cleanup zoned device information.
*/
-static void dmz_put_zoned_device(struct dm_target *ti)
+static void dmz_put_zoned_devices(struct dm_target *ti)
{
struct dmz_target *dmz = ti->private;
int i;
- for (i = 0; i < dmz->nr_ddevs; i++) {
- if (dmz->ddev[i]) {
+ for (i = 0; i < dmz->nr_ddevs; i++)
+ if (dmz->ddev[i])
dm_put_device(ti, dmz->ddev[i]);
- dmz->ddev[i] = NULL;
- }
- }
+
+ kfree(dmz->ddev);
}
static int dmz_fixup_devices(struct dm_target *ti)
@@ -948,7 +947,7 @@ err_bio:
err_meta:
dmz_dtr_metadata(dmz->metadata);
err_dev:
- dmz_put_zoned_device(ti);
+ dmz_put_zoned_devices(ti);
err:
kfree(dmz->dev);
kfree(dmz);
@@ -978,7 +977,7 @@ static void dmz_dtr(struct dm_target *ti)
bioset_exit(&dmz->bio_set);
- dmz_put_zoned_device(ti);
+ dmz_put_zoned_devices(ti);
mutex_destroy(&dmz->chunk_lock);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f0f118ab20fa..64a1f306c96c 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -715,24 +715,6 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
rcu_read_unlock();
}
-static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md,
- int *srcu_idx, blk_opf_t bio_opf)
-{
- if (bio_opf & REQ_NOWAIT)
- return dm_get_live_table_fast(md);
- else
- return dm_get_live_table(md, srcu_idx);
-}
-
-static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx,
- blk_opf_t bio_opf)
-{
- if (bio_opf & REQ_NOWAIT)
- dm_put_live_table_fast(md);
- else
- dm_put_live_table(md, srcu_idx);
-}
-
static char *_dm_claim_ptr = "I belong to device-mapper";
/*
@@ -1833,9 +1815,8 @@ static void dm_submit_bio(struct bio *bio)
struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
int srcu_idx;
struct dm_table *map;
- blk_opf_t bio_opf = bio->bi_opf;
- map = dm_get_live_table_bio(md, &srcu_idx, bio_opf);
+ map = dm_get_live_table(md, &srcu_idx);
/* If suspended, or map not yet available, queue this IO for later */
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
@@ -1851,7 +1832,7 @@ static void dm_submit_bio(struct bio *bio)
dm_split_and_process_bio(md, map, bio);
out:
- dm_put_live_table_bio(md, srcu_idx, bio_opf);
+ dm_put_live_table(md, srcu_idx);
}
static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0fe7ab6e8ab9..a104a025084d 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -798,14 +798,14 @@ void mddev_unlock(struct mddev *mddev)
} else
mutex_unlock(&mddev->reconfig_mutex);
+ md_wakeup_thread(mddev->thread);
+ wake_up(&mddev->sb_wait);
+
list_for_each_entry_safe(rdev, tmp, &delete, same_set) {
list_del_init(&rdev->same_set);
kobject_del(&rdev->kobj);
export_rdev(rdev, mddev);
}
-
- md_wakeup_thread(mddev->thread);
- wake_up(&mddev->sb_wait);
}
EXPORT_SYMBOL_GPL(mddev_unlock);
@@ -2452,7 +2452,8 @@ static void export_rdev(struct md_rdev *rdev, struct mddev *mddev)
if (test_bit(AutoDetected, &rdev->flags))
md_autodetect_dev(rdev->bdev->bd_dev);
#endif
- blkdev_put(rdev->bdev, mddev->external ? &claim_rdev : rdev);
+ blkdev_put(rdev->bdev,
+ test_bit(Holder, &rdev->flags) ? rdev : &claim_rdev);
rdev->bdev = NULL;
kobject_put(&rdev->kobj);
}
@@ -3632,6 +3633,7 @@ EXPORT_SYMBOL_GPL(md_rdev_init);
static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
{
struct md_rdev *rdev;
+ struct md_rdev *holder;
sector_t size;
int err;
@@ -3646,8 +3648,15 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
if (err)
goto out_clear_rdev;
+ if (super_format == -2) {
+ holder = &claim_rdev;
+ } else {
+ holder = rdev;
+ set_bit(Holder, &rdev->flags);
+ }
+
rdev->bdev = blkdev_get_by_dev(newdev, BLK_OPEN_READ | BLK_OPEN_WRITE,
- super_format == -2 ? &claim_rdev : rdev, NULL);
+ holder, NULL);
if (IS_ERR(rdev->bdev)) {
pr_warn("md: could not open device unknown-block(%u,%u).\n",
MAJOR(newdev), MINOR(newdev));
@@ -3684,7 +3693,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
return rdev;
out_blkdev_put:
- blkdev_put(rdev->bdev, super_format == -2 ? &claim_rdev : rdev);
+ blkdev_put(rdev->bdev, holder);
out_clear_rdev:
md_rdev_clear(rdev);
out_free_rdev:
@@ -8256,7 +8265,7 @@ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
spin_unlock(&all_mddevs_lock);
if (to_put)
- mddev_put(mddev);
+ mddev_put(to_put);
return next_mddev;
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 9bcb77bca963..7c9c13abd7ca 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -211,6 +211,9 @@ enum flag_bits {
* check if there is collision between raid1
* serial bios.
*/
+ Holder, /* rdev is used as holder while opening
+ * underlying disk exclusively.
+ */
};
static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4b30a1742162..2aabac773fe7 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1837,12 +1837,11 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
struct r1conf *conf = mddev->private;
int err = 0;
int number = rdev->raid_disk;
+ struct raid1_info *p = conf->mirrors + number;
if (unlikely(number >= conf->raid_disks))
goto abort;
- struct raid1_info *p = conf->mirrors + number;
-
if (rdev != p->rdev)
p = conf->mirrors + conf->raid_disks + number;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4cb9c608ee19..284cd71bcc68 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -854,6 +854,13 @@ struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
r5l_wake_reclaim(conf->log, 0);
+
+ /* release batch_last before wait to avoid risk of deadlock */
+ if (ctx && ctx->batch_last) {
+ raid5_release_stripe(ctx->batch_last);
+ ctx->batch_last = NULL;
+ }
+
wait_event_lock_irq(conf->wait_for_stripe,
is_inactive_blocked(conf, hash),
*(conf->hash_locks + hash));
diff --git a/drivers/media/common/videobuf2/frame_vector.c b/drivers/media/common/videobuf2/frame_vector.c
index 0f430ddc1f67..fd87747be9b1 100644
--- a/drivers/media/common/videobuf2/frame_vector.c
+++ b/drivers/media/common/videobuf2/frame_vector.c
@@ -31,6 +31,10 @@
* different type underlying the specified range of virtual addresses.
* When the function isn't able to map a single page, it returns error.
*
+ * Note that get_vaddr_frames() cannot follow VM_IO mappings. It used
+ * to be able to do that, but that could (racily) return non-refcounted
+ * pfns.
+ *
* This function takes care of grabbing mmap_lock as necessary.
*/
int get_vaddr_frames(unsigned long start, unsigned int nr_frames, bool write,
@@ -59,8 +63,6 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames, bool write,
if (likely(ret > 0))
return ret;
- /* This used to (racily) return non-refcounted pfns. Let people know */
- WARN_ONCE(1, "get_vaddr_frames() cannot follow VM_IO mapping");
vec->nr_frames = 0;
return ret ? ret : -EFAULT;
}
diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c
index 9b00b56230b6..cf8e5f1bd101 100644
--- a/drivers/media/dvb-frontends/ascot2e.c
+++ b/drivers/media/dvb-frontends/ascot2e.c
@@ -533,7 +533,7 @@ struct dvb_frontend *ascot2e_attach(struct dvb_frontend *fe,
priv->i2c_address, priv->i2c);
return fe;
}
-EXPORT_SYMBOL(ascot2e_attach);
+EXPORT_SYMBOL_GPL(ascot2e_attach);
MODULE_DESCRIPTION("Sony ASCOT2E terr/cab tuner driver");
MODULE_AUTHOR("info@netup.ru");
diff --git a/drivers/media/dvb-frontends/atbm8830.c b/drivers/media/dvb-frontends/atbm8830.c
index bdd16b9c5824..778c865085bf 100644
--- a/drivers/media/dvb-frontends/atbm8830.c
+++ b/drivers/media/dvb-frontends/atbm8830.c
@@ -489,7 +489,7 @@ error_out:
return NULL;
}
-EXPORT_SYMBOL(atbm8830_attach);
+EXPORT_SYMBOL_GPL(atbm8830_attach);
MODULE_DESCRIPTION("AltoBeam ATBM8830/8831 GB20600 demodulator driver");
MODULE_AUTHOR("David T. L. Wong <davidtlwong@gmail.com>");
diff --git a/drivers/media/dvb-frontends/au8522_dig.c b/drivers/media/dvb-frontends/au8522_dig.c
index 78cafdf27961..230436bf6cbd 100644
--- a/drivers/media/dvb-frontends/au8522_dig.c
+++ b/drivers/media/dvb-frontends/au8522_dig.c
@@ -879,7 +879,7 @@ error:
au8522_release_state(state);
return NULL;
}
-EXPORT_SYMBOL(au8522_attach);
+EXPORT_SYMBOL_GPL(au8522_attach);
static const struct dvb_frontend_ops au8522_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c
index 68b92b4419cf..b3f5c49accaf 100644
--- a/drivers/media/dvb-frontends/bcm3510.c
+++ b/drivers/media/dvb-frontends/bcm3510.c
@@ -835,7 +835,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(bcm3510_attach);
+EXPORT_SYMBOL_GPL(bcm3510_attach);
static const struct dvb_frontend_ops bcm3510_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
diff --git a/drivers/media/dvb-frontends/cx22700.c b/drivers/media/dvb-frontends/cx22700.c
index b39ff516271b..1d04c0a652b2 100644
--- a/drivers/media/dvb-frontends/cx22700.c
+++ b/drivers/media/dvb-frontends/cx22700.c
@@ -432,4 +432,4 @@ MODULE_DESCRIPTION("Conexant CX22700 DVB-T Demodulator driver");
MODULE_AUTHOR("Holger Waechtler");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(cx22700_attach);
+EXPORT_SYMBOL_GPL(cx22700_attach);
diff --git a/drivers/media/dvb-frontends/cx22702.c b/drivers/media/dvb-frontends/cx22702.c
index cc6acbf6393d..61ad34b7004b 100644
--- a/drivers/media/dvb-frontends/cx22702.c
+++ b/drivers/media/dvb-frontends/cx22702.c
@@ -604,7 +604,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(cx22702_attach);
+EXPORT_SYMBOL_GPL(cx22702_attach);
static const struct dvb_frontend_ops cx22702_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/cx24110.c b/drivers/media/dvb-frontends/cx24110.c
index 6f99d6a27be2..9aeea089756f 100644
--- a/drivers/media/dvb-frontends/cx24110.c
+++ b/drivers/media/dvb-frontends/cx24110.c
@@ -653,4 +653,4 @@ MODULE_DESCRIPTION("Conexant CX24110 DVB-S Demodulator driver");
MODULE_AUTHOR("Peter Hettkamp");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(cx24110_attach);
+EXPORT_SYMBOL_GPL(cx24110_attach);
diff --git a/drivers/media/dvb-frontends/cx24113.c b/drivers/media/dvb-frontends/cx24113.c
index dd55d314bf9a..203cb6b3f941 100644
--- a/drivers/media/dvb-frontends/cx24113.c
+++ b/drivers/media/dvb-frontends/cx24113.c
@@ -590,7 +590,7 @@ error:
return NULL;
}
-EXPORT_SYMBOL(cx24113_attach);
+EXPORT_SYMBOL_GPL(cx24113_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
index ea8264ccbb4e..8b978a9f74a4 100644
--- a/drivers/media/dvb-frontends/cx24116.c
+++ b/drivers/media/dvb-frontends/cx24116.c
@@ -1133,7 +1133,7 @@ struct dvb_frontend *cx24116_attach(const struct cx24116_config *config,
state->frontend.demodulator_priv = state;
return &state->frontend;
}
-EXPORT_SYMBOL(cx24116_attach);
+EXPORT_SYMBOL_GPL(cx24116_attach);
/*
* Initialise or wake up device
diff --git a/drivers/media/dvb-frontends/cx24120.c b/drivers/media/dvb-frontends/cx24120.c
index 0f778660c72b..44515fdbe91d 100644
--- a/drivers/media/dvb-frontends/cx24120.c
+++ b/drivers/media/dvb-frontends/cx24120.c
@@ -305,7 +305,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(cx24120_attach);
+EXPORT_SYMBOL_GPL(cx24120_attach);
static int cx24120_test_rom(struct cx24120_state *state)
{
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
index 3d84ee17e54c..539889e638cc 100644
--- a/drivers/media/dvb-frontends/cx24123.c
+++ b/drivers/media/dvb-frontends/cx24123.c
@@ -1096,7 +1096,7 @@ error:
return NULL;
}
-EXPORT_SYMBOL(cx24123_attach);
+EXPORT_SYMBOL_GPL(cx24123_attach);
static const struct dvb_frontend_ops cx24123_ops = {
.delsys = { SYS_DVBS },
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index d7ee294c6833..7feb08dccfa1 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -536,7 +536,7 @@ struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *config,
return pdata.get_dvb_frontend(client);
}
-EXPORT_SYMBOL(cxd2820r_attach);
+EXPORT_SYMBOL_GPL(cxd2820r_attach);
static struct dvb_frontend *cxd2820r_get_dvb_frontend(struct i2c_client *client)
{
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index ef403a9fb753..d925ca24183b 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -3930,14 +3930,14 @@ struct dvb_frontend *cxd2841er_attach_s(struct cxd2841er_config *cfg,
{
return cxd2841er_attach(cfg, i2c, SYS_DVBS);
}
-EXPORT_SYMBOL(cxd2841er_attach_s);
+EXPORT_SYMBOL_GPL(cxd2841er_attach_s);
struct dvb_frontend *cxd2841er_attach_t_c(struct cxd2841er_config *cfg,
struct i2c_adapter *i2c)
{
return cxd2841er_attach(cfg, i2c, 0);
}
-EXPORT_SYMBOL(cxd2841er_attach_t_c);
+EXPORT_SYMBOL_GPL(cxd2841er_attach_t_c);
static const struct dvb_frontend_ops cxd2841er_dvbs_s2_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
diff --git a/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c b/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
index f67b6d24b8d4..a06d8368ca79 100644
--- a/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
+++ b/drivers/media/dvb-frontends/cxd2880/cxd2880_top.c
@@ -1950,7 +1950,7 @@ struct dvb_frontend *cxd2880_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(cxd2880_attach);
+EXPORT_SYMBOL_GPL(cxd2880_attach);
MODULE_DESCRIPTION("Sony CXD2880 DVB-T2/T tuner + demod driver");
MODULE_AUTHOR("Sony Semiconductor Solutions Corporation");
diff --git a/drivers/media/dvb-frontends/dib0070.c b/drivers/media/dvb-frontends/dib0070.c
index cafb41dba861..9a8e7cdd2a24 100644
--- a/drivers/media/dvb-frontends/dib0070.c
+++ b/drivers/media/dvb-frontends/dib0070.c
@@ -762,7 +762,7 @@ free_mem:
fe->tuner_priv = NULL;
return NULL;
}
-EXPORT_SYMBOL(dib0070_attach);
+EXPORT_SYMBOL_GPL(dib0070_attach);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Driver for the DiBcom 0070 base-band RF Tuner");
diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
index 903da33642df..c958bcff026e 100644
--- a/drivers/media/dvb-frontends/dib0090.c
+++ b/drivers/media/dvb-frontends/dib0090.c
@@ -2634,7 +2634,7 @@ struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapte
return NULL;
}
-EXPORT_SYMBOL(dib0090_register);
+EXPORT_SYMBOL_GPL(dib0090_register);
struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config)
{
@@ -2660,7 +2660,7 @@ free_mem:
fe->tuner_priv = NULL;
return NULL;
}
-EXPORT_SYMBOL(dib0090_fw_register);
+EXPORT_SYMBOL_GPL(dib0090_fw_register);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_AUTHOR("Olivier Grenie <olivier.grenie@parrot.com>");
diff --git a/drivers/media/dvb-frontends/dib3000mb.c b/drivers/media/dvb-frontends/dib3000mb.c
index a6c2fc4586eb..c598b2a63325 100644
--- a/drivers/media/dvb-frontends/dib3000mb.c
+++ b/drivers/media/dvb-frontends/dib3000mb.c
@@ -815,4 +815,4 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(dib3000mb_attach);
+EXPORT_SYMBOL_GPL(dib3000mb_attach);
diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c
index 2e11a246aae0..c2fca8289aba 100644
--- a/drivers/media/dvb-frontends/dib3000mc.c
+++ b/drivers/media/dvb-frontends/dib3000mc.c
@@ -935,7 +935,7 @@ error:
kfree(st);
return NULL;
}
-EXPORT_SYMBOL(dib3000mc_attach);
+EXPORT_SYMBOL_GPL(dib3000mc_attach);
static const struct dvb_frontend_ops dib3000mc_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/dib7000m.c b/drivers/media/dvb-frontends/dib7000m.c
index 97ce97789c9e..fdb22f32e3a1 100644
--- a/drivers/media/dvb-frontends/dib7000m.c
+++ b/drivers/media/dvb-frontends/dib7000m.c
@@ -1434,7 +1434,7 @@ error:
kfree(st);
return NULL;
}
-EXPORT_SYMBOL(dib7000m_attach);
+EXPORT_SYMBOL_GPL(dib7000m_attach);
static const struct dvb_frontend_ops dib7000m_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index 9273758bf140..444fe1c4bf2d 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -2822,7 +2822,7 @@ void *dib7000p_attach(struct dib7000p_ops *ops)
return ops;
}
-EXPORT_SYMBOL(dib7000p_attach);
+EXPORT_SYMBOL_GPL(dib7000p_attach);
static const struct dvb_frontend_ops dib7000p_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index 2abda7d1cb6e..2f5165918163 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -4527,7 +4527,7 @@ void *dib8000_attach(struct dib8000_ops *ops)
return ops;
}
-EXPORT_SYMBOL(dib8000_attach);
+EXPORT_SYMBOL_GPL(dib8000_attach);
MODULE_AUTHOR("Olivier Grenie <Olivier.Grenie@parrot.com, Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Driver for the DiBcom 8000 ISDB-T demodulator");
diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c
index 1c57587a917a..83cf6eadd49c 100644
--- a/drivers/media/dvb-frontends/dib9000.c
+++ b/drivers/media/dvb-frontends/dib9000.c
@@ -2546,7 +2546,7 @@ error:
kfree(st);
return NULL;
}
-EXPORT_SYMBOL(dib9000_attach);
+EXPORT_SYMBOL_GPL(dib9000_attach);
static const struct dvb_frontend_ops dib9000_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index 68f4e8b5a0ab..a738573c8cd7 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -12372,7 +12372,7 @@ error:
return NULL;
}
-EXPORT_SYMBOL(drx39xxj_attach);
+EXPORT_SYMBOL_GPL(drx39xxj_attach);
static const struct dvb_frontend_ops drx39xxj_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index 9860cae65f1c..6a531937f4bb 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -2939,7 +2939,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(drxd_attach);
+EXPORT_SYMBOL_GPL(drxd_attach);
MODULE_DESCRIPTION("DRXD driver");
MODULE_AUTHOR("Micronas");
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 2770baebbbbc..87f3d4f0eb8c 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -6814,7 +6814,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(drxk_attach);
+EXPORT_SYMBOL_GPL(drxk_attach);
MODULE_DESCRIPTION("DRX-K driver");
MODULE_AUTHOR("Ralph Metzler");
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index 20fcf31af165..515aa7c7baf2 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -859,7 +859,7 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
ds3000_set_voltage(&state->frontend, SEC_VOLTAGE_OFF);
return &state->frontend;
}
-EXPORT_SYMBOL(ds3000_attach);
+EXPORT_SYMBOL_GPL(ds3000_attach);
static int ds3000_set_carrier_offset(struct dvb_frontend *fe,
s32 carrier_offset_khz)
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index 90cb41eacf98..ef697ab6bc2e 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -866,7 +866,7 @@ out:
return NULL;
}
-EXPORT_SYMBOL(dvb_pll_attach);
+EXPORT_SYMBOL_GPL(dvb_pll_attach);
static int
diff --git a/drivers/media/dvb-frontends/ec100.c b/drivers/media/dvb-frontends/ec100.c
index 03bd80666cf8..2ad0a3c2f756 100644
--- a/drivers/media/dvb-frontends/ec100.c
+++ b/drivers/media/dvb-frontends/ec100.c
@@ -299,7 +299,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(ec100_attach);
+EXPORT_SYMBOL_GPL(ec100_attach);
static const struct dvb_frontend_ops ec100_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
index 68c1a3e0e2ba..f127adee3ebb 100644
--- a/drivers/media/dvb-frontends/helene.c
+++ b/drivers/media/dvb-frontends/helene.c
@@ -1025,7 +1025,7 @@ struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe,
priv->i2c_address, priv->i2c);
return fe;
}
-EXPORT_SYMBOL(helene_attach_s);
+EXPORT_SYMBOL_GPL(helene_attach_s);
struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
const struct helene_config *config,
@@ -1061,7 +1061,7 @@ struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
priv->i2c_address, priv->i2c);
return fe;
}
-EXPORT_SYMBOL(helene_attach);
+EXPORT_SYMBOL_GPL(helene_attach);
static int helene_probe(struct i2c_client *client)
{
diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c
index 24bf5cbcc184..0330b78a5b3f 100644
--- a/drivers/media/dvb-frontends/horus3a.c
+++ b/drivers/media/dvb-frontends/horus3a.c
@@ -395,7 +395,7 @@ struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe,
priv->i2c_address, priv->i2c);
return fe;
}
-EXPORT_SYMBOL(horus3a_attach);
+EXPORT_SYMBOL_GPL(horus3a_attach);
MODULE_DESCRIPTION("Sony HORUS3A satellite tuner driver");
MODULE_AUTHOR("Sergey Kozlov <serjk@netup.ru>");
diff --git a/drivers/media/dvb-frontends/isl6405.c b/drivers/media/dvb-frontends/isl6405.c
index 2cd69b4ff82c..7d28a743f97e 100644
--- a/drivers/media/dvb-frontends/isl6405.c
+++ b/drivers/media/dvb-frontends/isl6405.c
@@ -141,7 +141,7 @@ struct dvb_frontend *isl6405_attach(struct dvb_frontend *fe, struct i2c_adapter
return fe;
}
-EXPORT_SYMBOL(isl6405_attach);
+EXPORT_SYMBOL_GPL(isl6405_attach);
MODULE_DESCRIPTION("Driver for lnb supply and control ic isl6405");
MODULE_AUTHOR("Hartmut Hackmann & Oliver Endriss");
diff --git a/drivers/media/dvb-frontends/isl6421.c b/drivers/media/dvb-frontends/isl6421.c
index 43b0dfc6f453..2e9f6f12f849 100644
--- a/drivers/media/dvb-frontends/isl6421.c
+++ b/drivers/media/dvb-frontends/isl6421.c
@@ -213,7 +213,7 @@ struct dvb_frontend *isl6421_attach(struct dvb_frontend *fe, struct i2c_adapter
return fe;
}
-EXPORT_SYMBOL(isl6421_attach);
+EXPORT_SYMBOL_GPL(isl6421_attach);
MODULE_DESCRIPTION("Driver for lnb supply and control ic isl6421");
MODULE_AUTHOR("Andrew de Quincey & Oliver Endriss");
diff --git a/drivers/media/dvb-frontends/isl6423.c b/drivers/media/dvb-frontends/isl6423.c
index 8cd1bb88ce6e..a0d0a3834057 100644
--- a/drivers/media/dvb-frontends/isl6423.c
+++ b/drivers/media/dvb-frontends/isl6423.c
@@ -289,7 +289,7 @@ exit:
fe->sec_priv = NULL;
return NULL;
}
-EXPORT_SYMBOL(isl6423_attach);
+EXPORT_SYMBOL_GPL(isl6423_attach);
MODULE_DESCRIPTION("ISL6423 SEC");
MODULE_AUTHOR("Manu Abraham");
diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c
index 1b33478653d1..f8f362f50e78 100644
--- a/drivers/media/dvb-frontends/itd1000.c
+++ b/drivers/media/dvb-frontends/itd1000.c
@@ -389,7 +389,7 @@ struct dvb_frontend *itd1000_attach(struct dvb_frontend *fe, struct i2c_adapter
return fe;
}
-EXPORT_SYMBOL(itd1000_attach);
+EXPORT_SYMBOL_GPL(itd1000_attach);
MODULE_AUTHOR("Patrick Boettcher <pb@linuxtv.org>");
MODULE_DESCRIPTION("Integrant ITD1000 driver");
diff --git a/drivers/media/dvb-frontends/ix2505v.c b/drivers/media/dvb-frontends/ix2505v.c
index 73f27105c139..3212e333d472 100644
--- a/drivers/media/dvb-frontends/ix2505v.c
+++ b/drivers/media/dvb-frontends/ix2505v.c
@@ -302,7 +302,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(ix2505v_attach);
+EXPORT_SYMBOL_GPL(ix2505v_attach);
module_param_named(debug, ix2505v_debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/l64781.c b/drivers/media/dvb-frontends/l64781.c
index c5106a1ea1cd..fe5af2453d55 100644
--- a/drivers/media/dvb-frontends/l64781.c
+++ b/drivers/media/dvb-frontends/l64781.c
@@ -593,4 +593,4 @@ MODULE_DESCRIPTION("LSI L64781 DVB-T Demodulator driver");
MODULE_AUTHOR("Holger Waechtler, Marko Kohtala");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(l64781_attach);
+EXPORT_SYMBOL_GPL(l64781_attach);
diff --git a/drivers/media/dvb-frontends/lg2160.c b/drivers/media/dvb-frontends/lg2160.c
index f343066c297e..fe700aa56bff 100644
--- a/drivers/media/dvb-frontends/lg2160.c
+++ b/drivers/media/dvb-frontends/lg2160.c
@@ -1426,7 +1426,7 @@ struct dvb_frontend *lg2160_attach(const struct lg2160_config *config,
return &state->frontend;
}
-EXPORT_SYMBOL(lg2160_attach);
+EXPORT_SYMBOL_GPL(lg2160_attach);
MODULE_DESCRIPTION("LG Electronics LG216x ATSC/MH Demodulator Driver");
MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
diff --git a/drivers/media/dvb-frontends/lgdt3305.c b/drivers/media/dvb-frontends/lgdt3305.c
index c15d3735d34c..bdc8311e1c0b 100644
--- a/drivers/media/dvb-frontends/lgdt3305.c
+++ b/drivers/media/dvb-frontends/lgdt3305.c
@@ -1148,7 +1148,7 @@ fail:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(lgdt3305_attach);
+EXPORT_SYMBOL_GPL(lgdt3305_attach);
static const struct dvb_frontend_ops lgdt3304_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index 3c6650f6e9a3..263887592415 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -1859,7 +1859,7 @@ fail:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(lgdt3306a_attach);
+EXPORT_SYMBOL_GPL(lgdt3306a_attach);
#ifdef DBG_DUMP
diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
index 97a10996c7fa..081d6ad3ce72 100644
--- a/drivers/media/dvb-frontends/lgdt330x.c
+++ b/drivers/media/dvb-frontends/lgdt330x.c
@@ -927,7 +927,7 @@ struct dvb_frontend *lgdt330x_attach(const struct lgdt330x_config *_config,
return lgdt330x_get_dvb_frontend(client);
}
-EXPORT_SYMBOL(lgdt330x_attach);
+EXPORT_SYMBOL_GPL(lgdt330x_attach);
static const struct dvb_frontend_ops lgdt3302_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
diff --git a/drivers/media/dvb-frontends/lgs8gxx.c b/drivers/media/dvb-frontends/lgs8gxx.c
index 30014979b985..ffaf60e16ecd 100644
--- a/drivers/media/dvb-frontends/lgs8gxx.c
+++ b/drivers/media/dvb-frontends/lgs8gxx.c
@@ -1043,7 +1043,7 @@ error_out:
return NULL;
}
-EXPORT_SYMBOL(lgs8gxx_attach);
+EXPORT_SYMBOL_GPL(lgs8gxx_attach);
MODULE_DESCRIPTION("Legend Silicon LGS8913/LGS8GXX DMB-TH demodulator driver");
MODULE_AUTHOR("David T. L. Wong <davidtlwong@gmail.com>");
diff --git a/drivers/media/dvb-frontends/lnbh25.c b/drivers/media/dvb-frontends/lnbh25.c
index 9ffe06cd787d..41bec050642b 100644
--- a/drivers/media/dvb-frontends/lnbh25.c
+++ b/drivers/media/dvb-frontends/lnbh25.c
@@ -173,7 +173,7 @@ struct dvb_frontend *lnbh25_attach(struct dvb_frontend *fe,
__func__, priv->i2c_address);
return fe;
}
-EXPORT_SYMBOL(lnbh25_attach);
+EXPORT_SYMBOL_GPL(lnbh25_attach);
MODULE_DESCRIPTION("ST LNBH25 driver");
MODULE_AUTHOR("info@netup.ru");
diff --git a/drivers/media/dvb-frontends/lnbp21.c b/drivers/media/dvb-frontends/lnbp21.c
index e564974162d6..32593b1f75a3 100644
--- a/drivers/media/dvb-frontends/lnbp21.c
+++ b/drivers/media/dvb-frontends/lnbp21.c
@@ -155,7 +155,7 @@ struct dvb_frontend *lnbh24_attach(struct dvb_frontend *fe,
return lnbx2x_attach(fe, i2c, override_set, override_clear,
i2c_addr, LNBH24_TTX);
}
-EXPORT_SYMBOL(lnbh24_attach);
+EXPORT_SYMBOL_GPL(lnbh24_attach);
struct dvb_frontend *lnbp21_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c, u8 override_set,
@@ -164,7 +164,7 @@ struct dvb_frontend *lnbp21_attach(struct dvb_frontend *fe,
return lnbx2x_attach(fe, i2c, override_set, override_clear,
0x08, LNBP21_ISEL);
}
-EXPORT_SYMBOL(lnbp21_attach);
+EXPORT_SYMBOL_GPL(lnbp21_attach);
MODULE_DESCRIPTION("Driver for lnb supply and control ic lnbp21, lnbh24");
MODULE_AUTHOR("Oliver Endriss, Igor M. Liplianin");
diff --git a/drivers/media/dvb-frontends/lnbp22.c b/drivers/media/dvb-frontends/lnbp22.c
index b8c7145d4cef..cb4ea5d3fad4 100644
--- a/drivers/media/dvb-frontends/lnbp22.c
+++ b/drivers/media/dvb-frontends/lnbp22.c
@@ -125,7 +125,7 @@ struct dvb_frontend *lnbp22_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(lnbp22_attach);
+EXPORT_SYMBOL_GPL(lnbp22_attach);
MODULE_DESCRIPTION("Driver for lnb supply and control ic lnbp22");
MODULE_AUTHOR("Dominik Kuhlen");
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index cf49ac56a37e..cf037b61b226 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1695,7 +1695,7 @@ struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
*tuner_i2c_adapter = pdata.get_i2c_adapter(client);
return pdata.get_dvb_frontend(client);
}
-EXPORT_SYMBOL(m88ds3103_attach);
+EXPORT_SYMBOL_GPL(m88ds3103_attach);
static const struct dvb_frontend_ops m88ds3103_ops = {
.delsys = {SYS_DVBS, SYS_DVBS2},
diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
index b294ba87e934..2aa98203cd65 100644
--- a/drivers/media/dvb-frontends/m88rs2000.c
+++ b/drivers/media/dvb-frontends/m88rs2000.c
@@ -808,7 +808,7 @@ error:
return NULL;
}
-EXPORT_SYMBOL(m88rs2000_attach);
+EXPORT_SYMBOL_GPL(m88rs2000_attach);
MODULE_DESCRIPTION("M88RS2000 DVB-S Demodulator driver");
MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com");
diff --git a/drivers/media/dvb-frontends/mb86a16.c b/drivers/media/dvb-frontends/mb86a16.c
index 3ec2cb4fa504..0fc45896e7b8 100644
--- a/drivers/media/dvb-frontends/mb86a16.c
+++ b/drivers/media/dvb-frontends/mb86a16.c
@@ -1853,6 +1853,6 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(mb86a16_attach);
+EXPORT_SYMBOL_GPL(mb86a16_attach);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Manu Abraham");
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index 125fed4891ba..f8e4bbee5bd5 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -2078,7 +2078,7 @@ struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
dev_info(&i2c->dev, "Detected a Fujitsu mb86a20s frontend\n");
return &state->frontend;
}
-EXPORT_SYMBOL(mb86a20s_attach);
+EXPORT_SYMBOL_GPL(mb86a20s_attach);
static const struct dvb_frontend_ops mb86a20s_ops = {
.delsys = { SYS_ISDBT },
diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
index d43a67045dbe..fb867dd8a26b 100644
--- a/drivers/media/dvb-frontends/mt312.c
+++ b/drivers/media/dvb-frontends/mt312.c
@@ -827,7 +827,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(mt312_attach);
+EXPORT_SYMBOL_GPL(mt312_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/mt352.c b/drivers/media/dvb-frontends/mt352.c
index 399d5c519027..1b2889f5cf67 100644
--- a/drivers/media/dvb-frontends/mt352.c
+++ b/drivers/media/dvb-frontends/mt352.c
@@ -593,4 +593,4 @@ MODULE_DESCRIPTION("Zarlink MT352 DVB-T Demodulator driver");
MODULE_AUTHOR("Holger Waechtler, Daniel Mack, Antonio Mancuso");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(mt352_attach);
+EXPORT_SYMBOL_GPL(mt352_attach);
diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
index 200b6dbc75f8..1c549ada6ebf 100644
--- a/drivers/media/dvb-frontends/nxt200x.c
+++ b/drivers/media/dvb-frontends/nxt200x.c
@@ -1216,5 +1216,5 @@ MODULE_DESCRIPTION("NXT200X (ATSC 8VSB & ITU-T J.83 AnnexB 64/256 QAM) Demodulat
MODULE_AUTHOR("Kirk Lapray, Michael Krufky, Jean-Francois Thibert, and Taylor Jacob");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(nxt200x_attach);
+EXPORT_SYMBOL_GPL(nxt200x_attach);
diff --git a/drivers/media/dvb-frontends/nxt6000.c b/drivers/media/dvb-frontends/nxt6000.c
index 136918f82dda..e8d4940370dd 100644
--- a/drivers/media/dvb-frontends/nxt6000.c
+++ b/drivers/media/dvb-frontends/nxt6000.c
@@ -621,4 +621,4 @@ MODULE_DESCRIPTION("NxtWave NXT6000 DVB-T demodulator driver");
MODULE_AUTHOR("Florian Schirmer");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(nxt6000_attach);
+EXPORT_SYMBOL_GPL(nxt6000_attach);
diff --git a/drivers/media/dvb-frontends/or51132.c b/drivers/media/dvb-frontends/or51132.c
index 355f3598627b..74e04c7cca1e 100644
--- a/drivers/media/dvb-frontends/or51132.c
+++ b/drivers/media/dvb-frontends/or51132.c
@@ -605,4 +605,4 @@ MODULE_AUTHOR("Kirk Lapray");
MODULE_AUTHOR("Trent Piepho");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(or51132_attach);
+EXPORT_SYMBOL_GPL(or51132_attach);
diff --git a/drivers/media/dvb-frontends/or51211.c b/drivers/media/dvb-frontends/or51211.c
index ae732dc5116e..2e8e7071a67a 100644
--- a/drivers/media/dvb-frontends/or51211.c
+++ b/drivers/media/dvb-frontends/or51211.c
@@ -551,5 +551,5 @@ MODULE_DESCRIPTION("Oren OR51211 VSB [pcHDTV HD-2000] Demodulator Driver");
MODULE_AUTHOR("Kirk Lapray");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(or51211_attach);
+EXPORT_SYMBOL_GPL(or51211_attach);
diff --git a/drivers/media/dvb-frontends/s5h1409.c b/drivers/media/dvb-frontends/s5h1409.c
index 3089cc174a6f..28b1dca077ea 100644
--- a/drivers/media/dvb-frontends/s5h1409.c
+++ b/drivers/media/dvb-frontends/s5h1409.c
@@ -981,7 +981,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(s5h1409_attach);
+EXPORT_SYMBOL_GPL(s5h1409_attach);
static const struct dvb_frontend_ops s5h1409_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
diff --git a/drivers/media/dvb-frontends/s5h1411.c b/drivers/media/dvb-frontends/s5h1411.c
index 2563a72e98b7..fc48e659c2d8 100644
--- a/drivers/media/dvb-frontends/s5h1411.c
+++ b/drivers/media/dvb-frontends/s5h1411.c
@@ -900,7 +900,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(s5h1411_attach);
+EXPORT_SYMBOL_GPL(s5h1411_attach);
static const struct dvb_frontend_ops s5h1411_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c
index 6bdec2898bc8..d700de1ea6c2 100644
--- a/drivers/media/dvb-frontends/s5h1420.c
+++ b/drivers/media/dvb-frontends/s5h1420.c
@@ -918,7 +918,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(s5h1420_attach);
+EXPORT_SYMBOL_GPL(s5h1420_attach);
static const struct dvb_frontend_ops s5h1420_ops = {
.delsys = { SYS_DVBS },
diff --git a/drivers/media/dvb-frontends/s5h1432.c b/drivers/media/dvb-frontends/s5h1432.c
index 956e8ee4b388..ff5d3bdf3bc6 100644
--- a/drivers/media/dvb-frontends/s5h1432.c
+++ b/drivers/media/dvb-frontends/s5h1432.c
@@ -355,7 +355,7 @@ struct dvb_frontend *s5h1432_attach(const struct s5h1432_config *config,
return &state->frontend;
}
-EXPORT_SYMBOL(s5h1432_attach);
+EXPORT_SYMBOL_GPL(s5h1432_attach);
static const struct dvb_frontend_ops s5h1432_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c
index f118d8e64103..7e461ac159fc 100644
--- a/drivers/media/dvb-frontends/s921.c
+++ b/drivers/media/dvb-frontends/s921.c
@@ -495,7 +495,7 @@ struct dvb_frontend *s921_attach(const struct s921_config *config,
return &state->frontend;
}
-EXPORT_SYMBOL(s921_attach);
+EXPORT_SYMBOL_GPL(s921_attach);
static const struct dvb_frontend_ops s921_ops = {
.delsys = { SYS_ISDBT },
diff --git a/drivers/media/dvb-frontends/si21xx.c b/drivers/media/dvb-frontends/si21xx.c
index 2d29d2c4d434..210ccd356e2b 100644
--- a/drivers/media/dvb-frontends/si21xx.c
+++ b/drivers/media/dvb-frontends/si21xx.c
@@ -937,7 +937,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(si21xx_attach);
+EXPORT_SYMBOL_GPL(si21xx_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/sp887x.c b/drivers/media/dvb-frontends/sp887x.c
index 146e7f2dd3c5..f59c0f96416b 100644
--- a/drivers/media/dvb-frontends/sp887x.c
+++ b/drivers/media/dvb-frontends/sp887x.c
@@ -624,4 +624,4 @@ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
MODULE_DESCRIPTION("Spase sp887x DVB-T demodulator driver");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(sp887x_attach);
+EXPORT_SYMBOL_GPL(sp887x_attach);
diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
index 4ee6c1e1e9f7..2f4d8fb400cd 100644
--- a/drivers/media/dvb-frontends/stb0899_drv.c
+++ b/drivers/media/dvb-frontends/stb0899_drv.c
@@ -1638,7 +1638,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(stb0899_attach);
+EXPORT_SYMBOL_GPL(stb0899_attach);
MODULE_PARM_DESC(verbose, "Set Verbosity level");
MODULE_AUTHOR("Manu Abraham");
MODULE_DESCRIPTION("STB0899 Multi-Std frontend");
diff --git a/drivers/media/dvb-frontends/stb6000.c b/drivers/media/dvb-frontends/stb6000.c
index 8c9800d577e0..d74e34677b92 100644
--- a/drivers/media/dvb-frontends/stb6000.c
+++ b/drivers/media/dvb-frontends/stb6000.c
@@ -232,7 +232,7 @@ struct dvb_frontend *stb6000_attach(struct dvb_frontend *fe, int addr,
return fe;
}
-EXPORT_SYMBOL(stb6000_attach);
+EXPORT_SYMBOL_GPL(stb6000_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
index 698866c4f15a..c5818a15a0d7 100644
--- a/drivers/media/dvb-frontends/stb6100.c
+++ b/drivers/media/dvb-frontends/stb6100.c
@@ -557,7 +557,7 @@ static void stb6100_release(struct dvb_frontend *fe)
kfree(state);
}
-EXPORT_SYMBOL(stb6100_attach);
+EXPORT_SYMBOL_GPL(stb6100_attach);
MODULE_PARM_DESC(verbose, "Set Verbosity level");
MODULE_AUTHOR("Manu Abraham");
diff --git a/drivers/media/dvb-frontends/stv0288.c b/drivers/media/dvb-frontends/stv0288.c
index 3ae1f3a2f142..a5581bd60f9e 100644
--- a/drivers/media/dvb-frontends/stv0288.c
+++ b/drivers/media/dvb-frontends/stv0288.c
@@ -590,7 +590,7 @@ error:
return NULL;
}
-EXPORT_SYMBOL(stv0288_attach);
+EXPORT_SYMBOL_GPL(stv0288_attach);
module_param(debug_legacy_dish_switch, int, 0444);
MODULE_PARM_DESC(debug_legacy_dish_switch,
diff --git a/drivers/media/dvb-frontends/stv0297.c b/drivers/media/dvb-frontends/stv0297.c
index 6d5962d5697a..9d4dbd99a5a7 100644
--- a/drivers/media/dvb-frontends/stv0297.c
+++ b/drivers/media/dvb-frontends/stv0297.c
@@ -710,4 +710,4 @@ MODULE_DESCRIPTION("ST STV0297 DVB-C Demodulator driver");
MODULE_AUTHOR("Dennis Noermann and Andrew de Quincey");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(stv0297_attach);
+EXPORT_SYMBOL_GPL(stv0297_attach);
diff --git a/drivers/media/dvb-frontends/stv0299.c b/drivers/media/dvb-frontends/stv0299.c
index b5263a0ee5aa..da7ff2c2e8e5 100644
--- a/drivers/media/dvb-frontends/stv0299.c
+++ b/drivers/media/dvb-frontends/stv0299.c
@@ -752,4 +752,4 @@ MODULE_DESCRIPTION("ST STV0299 DVB Demodulator driver");
MODULE_AUTHOR("Ralph Metzler, Holger Waechtler, Peter Schildmann, Felix Domke, Andreas Oberritter, Andrew de Quincey, Kenneth Aafly");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(stv0299_attach);
+EXPORT_SYMBOL_GPL(stv0299_attach);
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index a93f40617469..48326434488c 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -1750,7 +1750,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(stv0367ter_attach);
+EXPORT_SYMBOL_GPL(stv0367ter_attach);
static int stv0367cab_gate_ctrl(struct dvb_frontend *fe, int enable)
{
@@ -2919,7 +2919,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(stv0367cab_attach);
+EXPORT_SYMBOL_GPL(stv0367cab_attach);
/*
* Functions for operation on Digital Devices hardware
@@ -3340,7 +3340,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(stv0367ddb_attach);
+EXPORT_SYMBOL_GPL(stv0367ddb_attach);
MODULE_PARM_DESC(debug, "Set debug");
MODULE_PARM_DESC(i2c_debug, "Set i2c debug");
diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c
index 212312d20ff6..e7b9b9b11d7d 100644
--- a/drivers/media/dvb-frontends/stv0900_core.c
+++ b/drivers/media/dvb-frontends/stv0900_core.c
@@ -1957,7 +1957,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(stv0900_attach);
+EXPORT_SYMBOL_GPL(stv0900_attach);
MODULE_PARM_DESC(debug, "Set debug");
diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
index a07dc5fdeb3d..cc45139057ba 100644
--- a/drivers/media/dvb-frontends/stv090x.c
+++ b/drivers/media/dvb-frontends/stv090x.c
@@ -5071,7 +5071,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(stv090x_attach);
+EXPORT_SYMBOL_GPL(stv090x_attach);
static const struct i2c_device_id stv090x_id_table[] = {
{"stv090x", 0},
diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c
index 963f6a896102..1cf9c095dbff 100644
--- a/drivers/media/dvb-frontends/stv6110.c
+++ b/drivers/media/dvb-frontends/stv6110.c
@@ -427,7 +427,7 @@ struct dvb_frontend *stv6110_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(stv6110_attach);
+EXPORT_SYMBOL_GPL(stv6110_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
index 11653f846c12..c678f47d2449 100644
--- a/drivers/media/dvb-frontends/stv6110x.c
+++ b/drivers/media/dvb-frontends/stv6110x.c
@@ -467,7 +467,7 @@ const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
dev_info(&stv6110x->i2c->dev, "Attaching STV6110x\n");
return stv6110x->devctl;
}
-EXPORT_SYMBOL(stv6110x_attach);
+EXPORT_SYMBOL_GPL(stv6110x_attach);
static const struct i2c_device_id stv6110x_id_table[] = {
{"stv6110x", 0},
diff --git a/drivers/media/dvb-frontends/tda10021.c b/drivers/media/dvb-frontends/tda10021.c
index faa6e54b3372..462e12ab6bd1 100644
--- a/drivers/media/dvb-frontends/tda10021.c
+++ b/drivers/media/dvb-frontends/tda10021.c
@@ -523,4 +523,4 @@ MODULE_DESCRIPTION("Philips TDA10021 DVB-C demodulator driver");
MODULE_AUTHOR("Ralph Metzler, Holger Waechtler, Markus Schulz");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(tda10021_attach);
+EXPORT_SYMBOL_GPL(tda10021_attach);
diff --git a/drivers/media/dvb-frontends/tda10023.c b/drivers/media/dvb-frontends/tda10023.c
index 8f32edf6b700..4c2541ecd743 100644
--- a/drivers/media/dvb-frontends/tda10023.c
+++ b/drivers/media/dvb-frontends/tda10023.c
@@ -594,4 +594,4 @@ MODULE_DESCRIPTION("Philips TDA10023 DVB-C demodulator driver");
MODULE_AUTHOR("Georg Acher, Hartmut Birr");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(tda10023_attach);
+EXPORT_SYMBOL_GPL(tda10023_attach);
diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c
index 3cb4e5270e4f..5d5e4e9e4422 100644
--- a/drivers/media/dvb-frontends/tda10048.c
+++ b/drivers/media/dvb-frontends/tda10048.c
@@ -1138,7 +1138,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(tda10048_attach);
+EXPORT_SYMBOL_GPL(tda10048_attach);
static const struct dvb_frontend_ops tda10048_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
index 83a798ca9b00..6f306db6c615 100644
--- a/drivers/media/dvb-frontends/tda1004x.c
+++ b/drivers/media/dvb-frontends/tda1004x.c
@@ -1378,5 +1378,5 @@ MODULE_DESCRIPTION("Philips TDA10045H & TDA10046H DVB-T Demodulator");
MODULE_AUTHOR("Andrew de Quincey & Robert Schlabbach");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(tda10045_attach);
-EXPORT_SYMBOL(tda10046_attach);
+EXPORT_SYMBOL_GPL(tda10045_attach);
+EXPORT_SYMBOL_GPL(tda10046_attach);
diff --git a/drivers/media/dvb-frontends/tda10086.c b/drivers/media/dvb-frontends/tda10086.c
index cdcf97664bba..b449514ae585 100644
--- a/drivers/media/dvb-frontends/tda10086.c
+++ b/drivers/media/dvb-frontends/tda10086.c
@@ -764,4 +764,4 @@ MODULE_DESCRIPTION("Philips TDA10086 DVB-S Demodulator");
MODULE_AUTHOR("Andrew de Quincey");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(tda10086_attach);
+EXPORT_SYMBOL_GPL(tda10086_attach);
diff --git a/drivers/media/dvb-frontends/tda665x.c b/drivers/media/dvb-frontends/tda665x.c
index 13e8969da7f8..346be5011fb7 100644
--- a/drivers/media/dvb-frontends/tda665x.c
+++ b/drivers/media/dvb-frontends/tda665x.c
@@ -227,7 +227,7 @@ struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(tda665x_attach);
+EXPORT_SYMBOL_GPL(tda665x_attach);
MODULE_DESCRIPTION("TDA665x driver");
MODULE_AUTHOR("Manu Abraham");
diff --git a/drivers/media/dvb-frontends/tda8083.c b/drivers/media/dvb-frontends/tda8083.c
index e3e1c3db2c85..44f53624557b 100644
--- a/drivers/media/dvb-frontends/tda8083.c
+++ b/drivers/media/dvb-frontends/tda8083.c
@@ -481,4 +481,4 @@ MODULE_DESCRIPTION("Philips TDA8083 DVB-S Demodulator");
MODULE_AUTHOR("Ralph Metzler, Holger Waechtler");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(tda8083_attach);
+EXPORT_SYMBOL_GPL(tda8083_attach);
diff --git a/drivers/media/dvb-frontends/tda8261.c b/drivers/media/dvb-frontends/tda8261.c
index 0d576d41c67d..8b06f92745dc 100644
--- a/drivers/media/dvb-frontends/tda8261.c
+++ b/drivers/media/dvb-frontends/tda8261.c
@@ -188,7 +188,7 @@ exit:
return NULL;
}
-EXPORT_SYMBOL(tda8261_attach);
+EXPORT_SYMBOL_GPL(tda8261_attach);
MODULE_AUTHOR("Manu Abraham");
MODULE_DESCRIPTION("TDA8261 8PSK/QPSK Tuner");
diff --git a/drivers/media/dvb-frontends/tda826x.c b/drivers/media/dvb-frontends/tda826x.c
index f9703a1dd758..eafcf5f7da3d 100644
--- a/drivers/media/dvb-frontends/tda826x.c
+++ b/drivers/media/dvb-frontends/tda826x.c
@@ -164,7 +164,7 @@ struct dvb_frontend *tda826x_attach(struct dvb_frontend *fe, int addr, struct i2
return fe;
}
-EXPORT_SYMBOL(tda826x_attach);
+EXPORT_SYMBOL_GPL(tda826x_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index f5b60f827697..a5ebce57f35e 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -525,7 +525,7 @@ struct dvb_frontend *ts2020_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(ts2020_attach);
+EXPORT_SYMBOL_GPL(ts2020_attach);
/*
* We implement own regmap locking due to legacy DVB attach which uses frontend
diff --git a/drivers/media/dvb-frontends/tua6100.c b/drivers/media/dvb-frontends/tua6100.c
index 2483f614d0e7..41dd9b6d3190 100644
--- a/drivers/media/dvb-frontends/tua6100.c
+++ b/drivers/media/dvb-frontends/tua6100.c
@@ -186,7 +186,7 @@ struct dvb_frontend *tua6100_attach(struct dvb_frontend *fe, int addr, struct i2
fe->tuner_priv = priv;
return fe;
}
-EXPORT_SYMBOL(tua6100_attach);
+EXPORT_SYMBOL_GPL(tua6100_attach);
MODULE_DESCRIPTION("DVB tua6100 driver");
MODULE_AUTHOR("Andrew de Quincey");
diff --git a/drivers/media/dvb-frontends/ves1820.c b/drivers/media/dvb-frontends/ves1820.c
index 9df14d0be1c1..ee5620e731e9 100644
--- a/drivers/media/dvb-frontends/ves1820.c
+++ b/drivers/media/dvb-frontends/ves1820.c
@@ -434,4 +434,4 @@ MODULE_DESCRIPTION("VLSI VES1820 DVB-C Demodulator driver");
MODULE_AUTHOR("Ralph Metzler, Holger Waechtler");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(ves1820_attach);
+EXPORT_SYMBOL_GPL(ves1820_attach);
diff --git a/drivers/media/dvb-frontends/ves1x93.c b/drivers/media/dvb-frontends/ves1x93.c
index b74727286302..c60e21d26b88 100644
--- a/drivers/media/dvb-frontends/ves1x93.c
+++ b/drivers/media/dvb-frontends/ves1x93.c
@@ -540,4 +540,4 @@ MODULE_DESCRIPTION("VLSI VES1x93 DVB-S Demodulator driver");
MODULE_AUTHOR("Ralph Metzler");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(ves1x93_attach);
+EXPORT_SYMBOL_GPL(ves1x93_attach);
diff --git a/drivers/media/dvb-frontends/zl10036.c b/drivers/media/dvb-frontends/zl10036.c
index d392c7cce2ce..7ba575e9c55f 100644
--- a/drivers/media/dvb-frontends/zl10036.c
+++ b/drivers/media/dvb-frontends/zl10036.c
@@ -496,7 +496,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(zl10036_attach);
+EXPORT_SYMBOL_GPL(zl10036_attach);
module_param_named(debug, zl10036_debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c
index 1335bf78d5b7..a3e4d219400c 100644
--- a/drivers/media/dvb-frontends/zl10039.c
+++ b/drivers/media/dvb-frontends/zl10039.c
@@ -295,7 +295,7 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(zl10039_attach);
+EXPORT_SYMBOL_GPL(zl10039_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/zl10353.c b/drivers/media/dvb-frontends/zl10353.c
index 2a2cf20a73d6..8849d05475c2 100644
--- a/drivers/media/dvb-frontends/zl10353.c
+++ b/drivers/media/dvb-frontends/zl10353.c
@@ -665,4 +665,4 @@ MODULE_DESCRIPTION("Zarlink ZL10353 DVB-T demodulator driver");
MODULE_AUTHOR("Chris Pascoe");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(zl10353_attach);
+EXPORT_SYMBOL_GPL(zl10353_attach);
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index a1136fdfbed2..ec53abe2e84e 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -691,12 +691,12 @@ static int imx219_init_cfg(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- /* Initialize try_fmt */
+ /* Initialize the format. */
format = v4l2_subdev_get_pad_format(sd, state, 0);
imx219_update_pad_format(imx219, &supported_modes[0], format,
MEDIA_BUS_FMT_SRGGB10_1X10);
- /* Initialize crop rectangle. */
+ /* Initialize the crop rectangle. */
crop = v4l2_subdev_get_pad_crop(sd, state, 0);
crop->top = IMX219_PIXEL_ARRAY_TOP;
crop->left = IMX219_PIXEL_ARRAY_LEFT;
@@ -750,6 +750,7 @@ static int imx219_set_pad_format(struct v4l2_subdev *sd,
const struct imx219_mode *mode;
int exposure_max, exposure_def, hblank;
struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
mode = v4l2_find_nearest_size(supported_modes,
ARRAY_SIZE(supported_modes),
@@ -757,10 +758,12 @@ static int imx219_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
imx219_update_pad_format(imx219, mode, &fmt->format, fmt->format.code);
+
format = v4l2_subdev_get_pad_format(sd, sd_state, 0);
+ crop = v4l2_subdev_get_pad_crop(sd, sd_state, 0);
- if (imx219->mode == mode && format->code == fmt->format.code)
- return 0;
+ *format = fmt->format;
+ *crop = mode->crop;
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
imx219->mode = mode;
@@ -788,8 +791,6 @@ static int imx219_set_pad_format(struct v4l2_subdev *sd,
hblank);
}
- *format = fmt->format;
-
return 0;
}
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index 20e7c7cf5eeb..be84ff1e2b17 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -1110,7 +1110,6 @@ err_async:
static void max9286_v4l2_unregister(struct max9286_priv *priv)
{
- fwnode_handle_put(priv->sd.fwnode);
v4l2_ctrl_handler_free(&priv->ctrls);
v4l2_async_unregister_subdev(&priv->sd);
max9286_v4l2_notifier_unregister(priv);
diff --git a/drivers/media/i2c/rdacm21.c b/drivers/media/i2c/rdacm21.c
index a36a709243fd..3e22df36354f 100644
--- a/drivers/media/i2c/rdacm21.c
+++ b/drivers/media/i2c/rdacm21.c
@@ -608,7 +608,6 @@ static void rdacm21_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(&dev->sd);
v4l2_ctrl_handler_free(&dev->ctrls);
i2c_unregister_device(dev->isp);
- fwnode_handle_put(dev->sd.fwnode);
}
static const struct of_device_id rdacm21_of_ids[] = {
diff --git a/drivers/media/pci/bt8xx/bttv-risc.c b/drivers/media/pci/bt8xx/bttv-risc.c
index 436baf6c8b08..241a696e374a 100644
--- a/drivers/media/pci/bt8xx/bttv-risc.c
+++ b/drivers/media/pci/bt8xx/bttv-risc.c
@@ -68,9 +68,7 @@ bttv_risc_packed(struct bttv *btv, struct btcx_riscmem *risc,
sg = sglist;
for (line = 0; line < store_lines; line++) {
if ((line >= (store_lines - VCR_HACK_LINES)) &&
- (btv->opt_vcr_hack ||
- (V4L2_FIELD_HAS_BOTH(btv->field) ||
- btv->field == V4L2_FIELD_ALTERNATE)))
+ btv->opt_vcr_hack)
continue;
while (offset && offset >= sg_dma_len(sg)) {
offset -= sg_dma_len(sg);
diff --git a/drivers/media/pci/bt8xx/dst.c b/drivers/media/pci/bt8xx/dst.c
index 3e52a51982d7..110651e47831 100644
--- a/drivers/media/pci/bt8xx/dst.c
+++ b/drivers/media/pci/bt8xx/dst.c
@@ -1722,7 +1722,7 @@ struct dst_state *dst_attach(struct dst_state *state, struct dvb_adapter *dvb_ad
return state; /* Manu (DST is a card not a frontend) */
}
-EXPORT_SYMBOL(dst_attach);
+EXPORT_SYMBOL_GPL(dst_attach);
static const struct dvb_frontend_ops dst_dvbt_ops = {
.delsys = { SYS_DVBT },
diff --git a/drivers/media/pci/bt8xx/dst_ca.c b/drivers/media/pci/bt8xx/dst_ca.c
index d234a0f404d6..a9cc6e7a57f9 100644
--- a/drivers/media/pci/bt8xx/dst_ca.c
+++ b/drivers/media/pci/bt8xx/dst_ca.c
@@ -668,7 +668,7 @@ struct dvb_device *dst_ca_attach(struct dst_state *dst, struct dvb_adapter *dvb_
return NULL;
}
-EXPORT_SYMBOL(dst_ca_attach);
+EXPORT_SYMBOL_GPL(dst_ca_attach);
MODULE_DESCRIPTION("DST DVB-S/T/C Combo CA driver");
MODULE_AUTHOR("Manu Abraham");
diff --git a/drivers/media/pci/ddbridge/ddbridge-dummy-fe.c b/drivers/media/pci/ddbridge/ddbridge-dummy-fe.c
index 6868a0c4fc82..520ebd16b0c4 100644
--- a/drivers/media/pci/ddbridge/ddbridge-dummy-fe.c
+++ b/drivers/media/pci/ddbridge/ddbridge-dummy-fe.c
@@ -112,7 +112,7 @@ struct dvb_frontend *ddbridge_dummy_fe_qam_attach(void)
state->frontend.demodulator_priv = state;
return &state->frontend;
}
-EXPORT_SYMBOL(ddbridge_dummy_fe_qam_attach);
+EXPORT_SYMBOL_GPL(ddbridge_dummy_fe_qam_attach);
static const struct dvb_frontend_ops ddbridge_dummy_fe_qam_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
diff --git a/drivers/media/pci/intel/Kconfig b/drivers/media/pci/intel/Kconfig
index e113902fa806..ee4684159d3d 100644
--- a/drivers/media/pci/intel/Kconfig
+++ b/drivers/media/pci/intel/Kconfig
@@ -1,11 +1,19 @@
# SPDX-License-Identifier: GPL-2.0-only
+
+source "drivers/media/pci/intel/ipu3/Kconfig"
+source "drivers/media/pci/intel/ivsc/Kconfig"
+
config IPU_BRIDGE
- tristate
+ tristate "Intel IPU Bridge"
depends on I2C && ACPI
help
- This is a helper module for the IPU bridge, which can be
- used by ipu3 and other drivers. In order to handle module
- dependencies, this is selected by each driver that needs it.
+ The IPU bridge is a helper library for Intel IPU drivers to
+ function on systems shipped with Windows.
-source "drivers/media/pci/intel/ipu3/Kconfig"
-source "drivers/media/pci/intel/ivsc/Kconfig"
+ Currently used by the ipu3-cio2 and atomisp drivers.
+
+ Supported systems include:
+
+ - Microsoft Surface models (except Surface Pro 3)
+ - The Lenovo Miix line (for example the 510, 520, 710 and 720)
+ - Dell 7285
diff --git a/drivers/media/pci/intel/ipu3/Kconfig b/drivers/media/pci/intel/ipu3/Kconfig
index 0951545eab21..c0a250daa927 100644
--- a/drivers/media/pci/intel/ipu3/Kconfig
+++ b/drivers/media/pci/intel/ipu3/Kconfig
@@ -2,13 +2,13 @@
config VIDEO_IPU3_CIO2
tristate "Intel ipu3-cio2 driver"
depends on VIDEO_DEV && PCI
+ depends on IPU_BRIDGE || !IPU_BRIDGE
depends on ACPI || COMPILE_TEST
depends on X86
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
select VIDEOBUF2_DMA_SG
- select IPU_BRIDGE if CIO2_BRIDGE
help
This is the Intel IPU3 CIO2 CSI-2 receiver unit, found in Intel
@@ -18,22 +18,3 @@ config VIDEO_IPU3_CIO2
Say Y or M here if you have a Skylake/Kaby Lake SoC with MIPI CSI-2
connected camera.
The module will be called ipu3-cio2.
-
-config CIO2_BRIDGE
- bool "IPU3 CIO2 Sensors Bridge"
- depends on VIDEO_IPU3_CIO2 && ACPI
- depends on I2C
- help
- This extension provides an API for the ipu3-cio2 driver to create
- connections to cameras that are hidden in the SSDB buffer in ACPI.
- It can be used to enable support for cameras in detachable / hybrid
- devices that ship with Windows.
-
- Say Y here if your device is a detachable / hybrid laptop that comes
- with Windows installed by the OEM, for example:
-
- - Microsoft Surface models (except Surface Pro 3)
- - The Lenovo Miix line (for example the 510, 520, 710 and 720)
- - Dell 7285
-
- If in doubt, say N here.
diff --git a/drivers/media/pci/intel/ivsc/Kconfig b/drivers/media/pci/intel/ivsc/Kconfig
index 1ef1c4e3750d..a8cb981544f7 100644
--- a/drivers/media/pci/intel/ivsc/Kconfig
+++ b/drivers/media/pci/intel/ivsc/Kconfig
@@ -3,7 +3,10 @@
config INTEL_VSC
tristate "Intel Visual Sensing Controller"
- depends on INTEL_MEI && ACPI
+ depends on INTEL_MEI && ACPI && VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
help
This adds support for Intel Visual Sensing Controller (IVSC).
diff --git a/drivers/media/platform/intel/pxa_camera.c b/drivers/media/platform/intel/pxa_camera.c
index 6e6caf50e11e..59b89e421dc2 100644
--- a/drivers/media/platform/intel/pxa_camera.c
+++ b/drivers/media/platform/intel/pxa_camera.c
@@ -2398,7 +2398,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
PXA_CAM_DRV_NAME, pcdev);
if (err) {
dev_err(&pdev->dev, "Camera interrupt register failed\n");
- goto exit_v4l2_device_unregister;
+ goto exit_deactivate;
}
pcdev->notifier.ops = &pxa_camera_sensor_ops;
diff --git a/drivers/media/platform/marvell/Kconfig b/drivers/media/platform/marvell/Kconfig
index ec1a16734a28..d6499ffe30e8 100644
--- a/drivers/media/platform/marvell/Kconfig
+++ b/drivers/media/platform/marvell/Kconfig
@@ -7,7 +7,7 @@ config VIDEO_CAFE_CCIC
depends on V4L_PLATFORM_DRIVERS
depends on PCI && I2C && VIDEO_DEV
depends on COMMON_CLK
- select VIDEO_OV7670
+ select VIDEO_OV7670 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
select VIDEOBUF2_VMALLOC
select VIDEOBUF2_DMA_CONTIG
select VIDEOBUF2_DMA_SG
@@ -22,7 +22,7 @@ config VIDEO_MMP_CAMERA
depends on I2C && VIDEO_DEV
depends on ARCH_MMP || COMPILE_TEST
depends on COMMON_CLK
- select VIDEO_OV7670
+ select VIDEO_OV7670 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
select I2C_GPIO
select VIDEOBUF2_VMALLOC
select VIDEOBUF2_DMA_CONTIG
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
index d299cc2962a5..ae6290d28f8e 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
@@ -138,7 +138,8 @@ int vpu_enc_init(struct venc_vpu_inst *vpu)
vpu->ctx->vpu_inst = vpu;
status = mtk_vcodec_fw_ipi_register(vpu->ctx->dev->fw_handler, vpu->id,
- vpu_enc_ipi_handler, "venc", NULL);
+ vpu_enc_ipi_handler, "venc",
+ vpu->ctx->dev);
if (status) {
mtk_venc_err(vpu->ctx, "vpu_ipi_register fail %d", status);
diff --git a/drivers/media/platform/nxp/imx-mipi-csis.c b/drivers/media/platform/nxp/imx-mipi-csis.c
index 16f19a640130..5f93712bf485 100644
--- a/drivers/media/platform/nxp/imx-mipi-csis.c
+++ b/drivers/media/platform/nxp/imx-mipi-csis.c
@@ -1490,7 +1490,6 @@ err_cleanup:
v4l2_async_unregister_subdev(&csis->sd);
err_disable_clock:
mipi_csis_clk_disable(csis);
- fwnode_handle_put(csis->sd.fwnode);
return ret;
}
@@ -1510,7 +1509,6 @@ static void mipi_csis_remove(struct platform_device *pdev)
mipi_csis_clk_disable(csis);
v4l2_subdev_cleanup(&csis->sd);
media_entity_cleanup(&csis->sd.entity);
- fwnode_handle_put(csis->sd.fwnode);
pm_runtime_set_suspended(&pdev->dev);
}
diff --git a/drivers/media/platform/via/Kconfig b/drivers/media/platform/via/Kconfig
index 8926eb0803b2..6e603c038248 100644
--- a/drivers/media/platform/via/Kconfig
+++ b/drivers/media/platform/via/Kconfig
@@ -7,7 +7,7 @@ config VIDEO_VIA_CAMERA
depends on V4L_PLATFORM_DRIVERS
depends on FB_VIA && VIDEO_DEV
select VIDEOBUF2_DMA_SG
- select VIDEO_OV7670
+ select VIDEO_OV7670 if VIDEO_CAMERA_SENSOR
help
Driver support for the integrated camera controller in VIA
Chrome9 chipsets. Currently only tested on OLPC xo-1.5 systems
diff --git a/drivers/media/tuners/fc0011.c b/drivers/media/tuners/fc0011.c
index eaa3bbc903d7..3d3b54be2955 100644
--- a/drivers/media/tuners/fc0011.c
+++ b/drivers/media/tuners/fc0011.c
@@ -499,7 +499,7 @@ struct dvb_frontend *fc0011_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(fc0011_attach);
+EXPORT_SYMBOL_GPL(fc0011_attach);
MODULE_DESCRIPTION("Fitipower FC0011 silicon tuner driver");
MODULE_AUTHOR("Michael Buesch <m@bues.ch>");
diff --git a/drivers/media/tuners/fc0012.c b/drivers/media/tuners/fc0012.c
index 4429d5e8c579..81e65acbdb17 100644
--- a/drivers/media/tuners/fc0012.c
+++ b/drivers/media/tuners/fc0012.c
@@ -495,7 +495,7 @@ err:
return fe;
}
-EXPORT_SYMBOL(fc0012_attach);
+EXPORT_SYMBOL_GPL(fc0012_attach);
MODULE_DESCRIPTION("Fitipower FC0012 silicon tuner driver");
MODULE_AUTHOR("Hans-Frieder Vogt <hfvogt@gmx.net>");
diff --git a/drivers/media/tuners/fc0013.c b/drivers/media/tuners/fc0013.c
index 29dd9b55ff33..1006a2798eef 100644
--- a/drivers/media/tuners/fc0013.c
+++ b/drivers/media/tuners/fc0013.c
@@ -608,7 +608,7 @@ struct dvb_frontend *fc0013_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(fc0013_attach);
+EXPORT_SYMBOL_GPL(fc0013_attach);
MODULE_DESCRIPTION("Fitipower FC0013 silicon tuner driver");
MODULE_AUTHOR("Hans-Frieder Vogt <hfvogt@gmx.net>");
diff --git a/drivers/media/tuners/max2165.c b/drivers/media/tuners/max2165.c
index 1c746bed51fe..1575ab94e1c8 100644
--- a/drivers/media/tuners/max2165.c
+++ b/drivers/media/tuners/max2165.c
@@ -410,7 +410,7 @@ struct dvb_frontend *max2165_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(max2165_attach);
+EXPORT_SYMBOL_GPL(max2165_attach);
MODULE_AUTHOR("David T. L. Wong <davidtlwong@gmail.com>");
MODULE_DESCRIPTION("Maxim MAX2165 silicon tuner driver");
diff --git a/drivers/media/tuners/mc44s803.c b/drivers/media/tuners/mc44s803.c
index 0c9161516abd..ed8bdf7ebd99 100644
--- a/drivers/media/tuners/mc44s803.c
+++ b/drivers/media/tuners/mc44s803.c
@@ -356,7 +356,7 @@ error:
kfree(priv);
return NULL;
}
-EXPORT_SYMBOL(mc44s803_attach);
+EXPORT_SYMBOL_GPL(mc44s803_attach);
MODULE_AUTHOR("Jochen Friedrich");
MODULE_DESCRIPTION("Freescale MC44S803 silicon tuner driver");
diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
index 0278a9f0aeef..4205ed4cf467 100644
--- a/drivers/media/tuners/mt2060.c
+++ b/drivers/media/tuners/mt2060.c
@@ -440,7 +440,7 @@ struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struct i2c_adapter
return fe;
}
-EXPORT_SYMBOL(mt2060_attach);
+EXPORT_SYMBOL_GPL(mt2060_attach);
static int mt2060_probe(struct i2c_client *client)
{
diff --git a/drivers/media/tuners/mt2131.c b/drivers/media/tuners/mt2131.c
index 37f50ff6c0bd..eebc06088341 100644
--- a/drivers/media/tuners/mt2131.c
+++ b/drivers/media/tuners/mt2131.c
@@ -274,7 +274,7 @@ struct dvb_frontend * mt2131_attach(struct dvb_frontend *fe,
fe->tuner_priv = priv;
return fe;
}
-EXPORT_SYMBOL(mt2131_attach);
+EXPORT_SYMBOL_GPL(mt2131_attach);
MODULE_AUTHOR("Steven Toth");
MODULE_DESCRIPTION("Microtune MT2131 silicon tuner driver");
diff --git a/drivers/media/tuners/mt2266.c b/drivers/media/tuners/mt2266.c
index 6136f20fa9b7..2e92885a6bcb 100644
--- a/drivers/media/tuners/mt2266.c
+++ b/drivers/media/tuners/mt2266.c
@@ -336,7 +336,7 @@ struct dvb_frontend * mt2266_attach(struct dvb_frontend *fe, struct i2c_adapter
mt2266_calibrate(priv);
return fe;
}
-EXPORT_SYMBOL(mt2266_attach);
+EXPORT_SYMBOL_GPL(mt2266_attach);
MODULE_AUTHOR("Olivier DANET");
MODULE_DESCRIPTION("Microtune MT2266 silicon tuner driver");
diff --git a/drivers/media/tuners/mxl5005s.c b/drivers/media/tuners/mxl5005s.c
index 06dfab9fb8cb..d9bfa257a005 100644
--- a/drivers/media/tuners/mxl5005s.c
+++ b/drivers/media/tuners/mxl5005s.c
@@ -4120,7 +4120,7 @@ struct dvb_frontend *mxl5005s_attach(struct dvb_frontend *fe,
fe->tuner_priv = state;
return fe;
}
-EXPORT_SYMBOL(mxl5005s_attach);
+EXPORT_SYMBOL_GPL(mxl5005s_attach);
MODULE_DESCRIPTION("MaxLinear MXL5005S silicon tuner driver");
MODULE_AUTHOR("Steven Toth");
diff --git a/drivers/media/tuners/qt1010.c b/drivers/media/tuners/qt1010.c
index a7b19863f489..48fc79cd4027 100644
--- a/drivers/media/tuners/qt1010.c
+++ b/drivers/media/tuners/qt1010.c
@@ -441,7 +441,7 @@ struct dvb_frontend * qt1010_attach(struct dvb_frontend *fe,
fe->tuner_priv = priv;
return fe;
}
-EXPORT_SYMBOL(qt1010_attach);
+EXPORT_SYMBOL_GPL(qt1010_attach);
MODULE_DESCRIPTION("Quantek QT1010 silicon tuner driver");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/tuners/tda18218.c b/drivers/media/tuners/tda18218.c
index 4ed94646116f..7d8d84dcb245 100644
--- a/drivers/media/tuners/tda18218.c
+++ b/drivers/media/tuners/tda18218.c
@@ -336,7 +336,7 @@ struct dvb_frontend *tda18218_attach(struct dvb_frontend *fe,
return fe;
}
-EXPORT_SYMBOL(tda18218_attach);
+EXPORT_SYMBOL_GPL(tda18218_attach);
MODULE_DESCRIPTION("NXP TDA18218HN silicon tuner driver");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/tuners/xc2028.c b/drivers/media/tuners/xc2028.c
index 69c2e1b99bf1..5a967edceca9 100644
--- a/drivers/media/tuners/xc2028.c
+++ b/drivers/media/tuners/xc2028.c
@@ -1512,7 +1512,7 @@ fail:
return NULL;
}
-EXPORT_SYMBOL(xc2028_attach);
+EXPORT_SYMBOL_GPL(xc2028_attach);
MODULE_DESCRIPTION("Xceive xc2028/xc3028 tuner driver");
MODULE_AUTHOR("Michel Ludwig <michel.ludwig@gmail.com>");
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index d59b4ab77430..57ded9ff3f04 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -1742,7 +1742,7 @@ fail2:
xc4000_release(fe);
return NULL;
}
-EXPORT_SYMBOL(xc4000_attach);
+EXPORT_SYMBOL_GPL(xc4000_attach);
MODULE_AUTHOR("Steven Toth, Davide Ferri");
MODULE_DESCRIPTION("Xceive xc4000 silicon tuner driver");
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index 7b7d9fe4f945..2182e5b7b606 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -1460,7 +1460,7 @@ fail:
xc5000_release(fe);
return NULL;
}
-EXPORT_SYMBOL(xc5000_attach);
+EXPORT_SYMBOL_GPL(xc5000_attach);
MODULE_AUTHOR("Steven Toth");
MODULE_DESCRIPTION("Xceive xc5000 silicon tuner driver");
diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
index b3c472b8c5a9..cb61fd6cc6c6 100644
--- a/drivers/media/usb/em28xx/Kconfig
+++ b/drivers/media/usb/em28xx/Kconfig
@@ -12,8 +12,8 @@ config VIDEO_EM28XX_V4L2
select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TVP5150 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_MSP3400 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
- select VIDEO_OV2640 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
+ select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
+ select VIDEO_OV2640 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
help
This is a video4linux driver for Empia 28xx based TV cards.
diff --git a/drivers/media/usb/go7007/Kconfig b/drivers/media/usb/go7007/Kconfig
index 4ff79940ad8d..b2a15d9fb1f3 100644
--- a/drivers/media/usb/go7007/Kconfig
+++ b/drivers/media/usb/go7007/Kconfig
@@ -12,8 +12,8 @@ config VIDEO_GO7007
select VIDEO_TW2804 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TW9903 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_TW9906 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
select VIDEO_UDA1342 if MEDIA_SUBDRV_AUTOSELECT
+ select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT && VIDEO_CAMERA_SENSOR
help
This is a video4linux driver for the WIS GO7007 MPEG
encoder chip.
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 5e9d3da862dd..e59a463c2761 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -1402,6 +1402,9 @@ int uvc_query_v4l2_menu(struct uvc_video_chain *chain,
query_menu->id = id;
query_menu->index = index;
+ if (index >= BITS_PER_TYPE(mapping->menu_mask))
+ return -EINVAL;
+
ret = mutex_lock_interruptible(&chain->ctrl_mutex);
if (ret < 0)
return -ERESTARTSYS;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 85be64579fc9..90ce58fd629e 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -241,6 +241,7 @@ config MFD_CS42L43
tristate
select MFD_CORE
select REGMAP
+ select REGMAP_IRQ
config MFD_CS42L43_I2C
tristate "Cirrus Logic CS42L43 (I2C)"
@@ -1220,7 +1221,7 @@ config MFD_RC5T583
different functionality of the device.
config MFD_RK8XX
- bool
+ tristate
select MFD_CORE
config MFD_RK8XX_I2C
@@ -1371,8 +1372,9 @@ config MFD_SC27XX_PMIC
and it also adds the irq_chip parts for handling the PMIC chip events.
config RZ_MTU3
- bool "Renesas RZ/G2L MTU3a core driver"
+ tristate "Renesas RZ/G2L MTU3a core driver"
depends on (ARCH_RZG2L && OF) || COMPILE_TEST
+ select MFD_CORE
help
Select this option to enable Renesas RZ/G2L MTU3a core driver for
the Multi-Function Timer Pulse Unit 3 (MTU3a) hardware available
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 9d9e9787d5e8..15c95828b09a 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -21,7 +21,6 @@
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <linux/of.h>
-#include <linux/of_device.h>
/*
* Interrupt register offsets
diff --git a/drivers/mfd/acer-ec-a500.c b/drivers/mfd/acer-ec-a500.c
index feb757e90dc3..79405835ff8e 100644
--- a/drivers/mfd/acer-ec-a500.c
+++ b/drivers/mfd/acer-ec-a500.c
@@ -9,7 +9,7 @@
#include <linux/i2c.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
diff --git a/drivers/mfd/act8945a.c b/drivers/mfd/act8945a.c
index 2406fcdff5f9..4e32ac3d573e 100644
--- a/drivers/mfd/act8945a.c
+++ b/drivers/mfd/act8945a.c
@@ -10,7 +10,7 @@
#include <linux/i2c.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
static const struct mfd_cell act8945a_devs[] = {
@@ -68,7 +68,7 @@ MODULE_DEVICE_TABLE(of, act8945a_of_match);
static struct i2c_driver act8945a_i2c_driver = {
.driver = {
.name = "act8945a",
- .of_match_table = of_match_ptr(act8945a_of_match),
+ .of_match_table = act8945a_of_match,
},
.probe = act8945a_i2c_probe,
.id_table = act8945a_i2c_id,
diff --git a/drivers/mfd/altera-a10sr.c b/drivers/mfd/altera-a10sr.c
index 34ef526f4aee..d53e433ab5c1 100644
--- a/drivers/mfd/altera-a10sr.c
+++ b/drivers/mfd/altera-a10sr.c
@@ -163,7 +163,7 @@ static struct spi_driver altr_a10sr_spi_driver = {
.probe = altr_a10sr_spi_probe,
.driver = {
.name = "altr_a10sr",
- .of_match_table = of_match_ptr(altr_a10sr_spi_of_match),
+ .of_match_table = altr_a10sr_spi_of_match,
},
.id_table = altr_a10sr_spi_ids,
};
diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c
index af205813b281..0e52bd2ebd74 100644
--- a/drivers/mfd/altera-sysmgr.c
+++ b/drivers/mfd/altera-sysmgr.c
@@ -14,8 +14,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index c166fcd331f1..19a0adf8ce3d 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -15,7 +15,6 @@
#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/mfd/atc260x-core.c b/drivers/mfd/atc260x-core.c
index 7c5de3ae776e..67473b58b03d 100644
--- a/drivers/mfd/atc260x-core.c
+++ b/drivers/mfd/atc260x-core.c
@@ -11,7 +11,6 @@
#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#define ATC260X_CHIP_REV_MAX 31
diff --git a/drivers/mfd/atmel-hlcdc.c b/drivers/mfd/atmel-hlcdc.c
index 3c2414ba4b01..20de7f49a830 100644
--- a/drivers/mfd/atmel-hlcdc.c
+++ b/drivers/mfd/atmel-hlcdc.c
@@ -83,7 +83,6 @@ static int atmel_hlcdc_probe(struct platform_device *pdev)
struct atmel_hlcdc_regmap *hregmap;
struct device *dev = &pdev->dev;
struct atmel_hlcdc *hlcdc;
- struct resource *res;
hregmap = devm_kzalloc(dev, sizeof(*hregmap), GFP_KERNEL);
if (!hregmap)
@@ -93,8 +92,7 @@ static int atmel_hlcdc_probe(struct platform_device *pdev)
if (!hlcdc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hregmap->regs = devm_ioremap_resource(dev, res);
+ hregmap->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hregmap->regs))
return PTR_ERR(hregmap->regs);
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index c03bc5cda080..87603eeaa277 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -342,7 +342,7 @@ static const struct regmap_config axp152_regmap_config = {
.wr_table = &axp152_writeable_table,
.volatile_table = &axp152_volatile_table,
.max_register = AXP152_PWM1_DUTY_CYCLE,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_config axp192_regmap_config = {
@@ -360,7 +360,7 @@ static const struct regmap_config axp20x_regmap_config = {
.wr_table = &axp20x_writeable_table,
.volatile_table = &axp20x_volatile_table,
.max_register = AXP20X_OCV(AXP20X_OCV_MAX),
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_config axp22x_regmap_config = {
@@ -369,7 +369,7 @@ static const struct regmap_config axp22x_regmap_config = {
.wr_table = &axp22x_writeable_table,
.volatile_table = &axp22x_volatile_table,
.max_register = AXP22X_BATLOW_THRES1,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_config axp288_regmap_config = {
@@ -378,7 +378,7 @@ static const struct regmap_config axp288_regmap_config = {
.wr_table = &axp288_writeable_table,
.volatile_table = &axp288_volatile_table,
.max_register = AXP288_FG_TUNE5,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_config axp313a_regmap_config = {
@@ -396,7 +396,7 @@ static const struct regmap_config axp806_regmap_config = {
.wr_table = &axp806_writeable_table,
.volatile_table = &axp806_volatile_table,
.max_register = AXP806_REG_ADDR_EXT,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_config axp15060_regmap_config = {
@@ -405,7 +405,7 @@ static const struct regmap_config axp15060_regmap_config = {
.wr_table = &axp15060_writeable_table,
.volatile_table = &axp15060_volatile_table,
.max_register = AXP15060_IRQ2_STATE,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
#define INIT_REGMAP_IRQ(_variant, _irq, _off, _mask) \
diff --git a/drivers/mfd/bcm590xx.c b/drivers/mfd/bcm590xx.c
index 9f39b46b87f4..92eede9a5e61 100644
--- a/drivers/mfd/bcm590xx.c
+++ b/drivers/mfd/bcm590xx.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 92f4dfccc3cc..79d393b602bf 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -10,7 +10,7 @@
#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/platform_data/cros_ec_chardev.h>
#include <linux/platform_data/cros_ec_commands.h>
diff --git a/drivers/mfd/cs42l43.c b/drivers/mfd/cs42l43.c
index 37b23e9bae82..7b6d07cbe6fc 100644
--- a/drivers/mfd/cs42l43.c
+++ b/drivers/mfd/cs42l43.c
@@ -1178,8 +1178,8 @@ err:
}
EXPORT_NS_GPL_DEV_PM_OPS(cs42l43_pm_ops, MFD_CS42L43) = {
- SET_SYSTEM_SLEEP_PM_OPS(cs42l43_suspend, cs42l43_resume)
- SET_RUNTIME_PM_OPS(cs42l43_runtime_suspend, cs42l43_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(cs42l43_suspend, cs42l43_resume)
+ RUNTIME_PM_OPS(cs42l43_runtime_suspend, cs42l43_runtime_resume, NULL)
};
MODULE_DESCRIPTION("CS42L43 Core Driver");
diff --git a/drivers/mfd/cs47l15-tables.c b/drivers/mfd/cs47l15-tables.c
index 3c77f0a24e9b..59b005cc1e33 100644
--- a/drivers/mfd/cs47l15-tables.c
+++ b/drivers/mfd/cs47l15-tables.c
@@ -1249,7 +1249,7 @@ const struct regmap_config cs47l15_16bit_spi_regmap = {
.readable_reg = &cs47l15_16bit_readable_register,
.volatile_reg = &cs47l15_16bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = cs47l15_reg_default,
.num_reg_defaults = ARRAY_SIZE(cs47l15_reg_default),
};
@@ -1264,7 +1264,7 @@ const struct regmap_config cs47l15_16bit_i2c_regmap = {
.readable_reg = &cs47l15_16bit_readable_register,
.volatile_reg = &cs47l15_16bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = cs47l15_reg_default,
.num_reg_defaults = ARRAY_SIZE(cs47l15_reg_default),
};
@@ -1281,7 +1281,7 @@ const struct regmap_config cs47l15_32bit_spi_regmap = {
.readable_reg = &cs47l15_32bit_readable_register,
.volatile_reg = &cs47l15_32bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
EXPORT_SYMBOL_GPL(cs47l15_32bit_spi_regmap);
@@ -1295,6 +1295,6 @@ const struct regmap_config cs47l15_32bit_i2c_regmap = {
.readable_reg = &cs47l15_32bit_readable_register,
.volatile_reg = &cs47l15_32bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
EXPORT_SYMBOL_GPL(cs47l15_32bit_i2c_regmap);
diff --git a/drivers/mfd/cs47l24-tables.c b/drivers/mfd/cs47l24-tables.c
index c289d92a5c1d..878dfd298a17 100644
--- a/drivers/mfd/cs47l24-tables.c
+++ b/drivers/mfd/cs47l24-tables.c
@@ -1616,7 +1616,7 @@ const struct regmap_config cs47l24_spi_regmap = {
.readable_reg = cs47l24_readable_register,
.volatile_reg = cs47l24_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = cs47l24_reg_default,
.num_reg_defaults = ARRAY_SIZE(cs47l24_reg_default),
};
diff --git a/drivers/mfd/cs47l35-tables.c b/drivers/mfd/cs47l35-tables.c
index a0bc6c5100d6..274f4b05850a 100644
--- a/drivers/mfd/cs47l35-tables.c
+++ b/drivers/mfd/cs47l35-tables.c
@@ -1498,7 +1498,7 @@ const struct regmap_config cs47l35_16bit_spi_regmap = {
.readable_reg = cs47l35_16bit_readable_register,
.volatile_reg = cs47l35_16bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = cs47l35_reg_default,
.num_reg_defaults = ARRAY_SIZE(cs47l35_reg_default),
};
@@ -1515,7 +1515,7 @@ const struct regmap_config cs47l35_16bit_i2c_regmap = {
.readable_reg = cs47l35_16bit_readable_register,
.volatile_reg = cs47l35_16bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = cs47l35_reg_default,
.num_reg_defaults = ARRAY_SIZE(cs47l35_reg_default),
};
@@ -1534,7 +1534,7 @@ const struct regmap_config cs47l35_32bit_spi_regmap = {
.readable_reg = cs47l35_32bit_readable_register,
.volatile_reg = cs47l35_32bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
EXPORT_SYMBOL_GPL(cs47l35_32bit_spi_regmap);
@@ -1550,6 +1550,6 @@ const struct regmap_config cs47l35_32bit_i2c_regmap = {
.readable_reg = cs47l35_32bit_readable_register,
.volatile_reg = cs47l35_32bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
EXPORT_SYMBOL_GPL(cs47l35_32bit_i2c_regmap);
diff --git a/drivers/mfd/cs47l85-tables.c b/drivers/mfd/cs47l85-tables.c
index 270d8eda3f5f..f397894827ce 100644
--- a/drivers/mfd/cs47l85-tables.c
+++ b/drivers/mfd/cs47l85-tables.c
@@ -2836,7 +2836,7 @@ const struct regmap_config cs47l85_16bit_spi_regmap = {
.readable_reg = cs47l85_16bit_readable_register,
.volatile_reg = cs47l85_16bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = cs47l85_reg_default,
.num_reg_defaults = ARRAY_SIZE(cs47l85_reg_default),
};
@@ -2853,7 +2853,7 @@ const struct regmap_config cs47l85_16bit_i2c_regmap = {
.readable_reg = cs47l85_16bit_readable_register,
.volatile_reg = cs47l85_16bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = cs47l85_reg_default,
.num_reg_defaults = ARRAY_SIZE(cs47l85_reg_default),
};
@@ -2872,7 +2872,7 @@ const struct regmap_config cs47l85_32bit_spi_regmap = {
.readable_reg = cs47l85_32bit_readable_register,
.volatile_reg = cs47l85_32bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
EXPORT_SYMBOL_GPL(cs47l85_32bit_spi_regmap);
@@ -2888,6 +2888,6 @@ const struct regmap_config cs47l85_32bit_i2c_regmap = {
.readable_reg = cs47l85_32bit_readable_register,
.volatile_reg = cs47l85_32bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
EXPORT_SYMBOL_GPL(cs47l85_32bit_i2c_regmap);
diff --git a/drivers/mfd/cs47l90-tables.c b/drivers/mfd/cs47l90-tables.c
index 7345fc09c0bb..6f9ceb36c533 100644
--- a/drivers/mfd/cs47l90-tables.c
+++ b/drivers/mfd/cs47l90-tables.c
@@ -2539,7 +2539,7 @@ const struct regmap_config cs47l90_16bit_spi_regmap = {
.readable_reg = cs47l90_16bit_readable_register,
.volatile_reg = cs47l90_16bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = cs47l90_reg_default,
.num_reg_defaults = ARRAY_SIZE(cs47l90_reg_default),
};
@@ -2556,7 +2556,7 @@ const struct regmap_config cs47l90_16bit_i2c_regmap = {
.readable_reg = cs47l90_16bit_readable_register,
.volatile_reg = cs47l90_16bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = cs47l90_reg_default,
.num_reg_defaults = ARRAY_SIZE(cs47l90_reg_default),
};
@@ -2575,7 +2575,7 @@ const struct regmap_config cs47l90_32bit_spi_regmap = {
.readable_reg = cs47l90_32bit_readable_register,
.volatile_reg = cs47l90_32bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
EXPORT_SYMBOL_GPL(cs47l90_32bit_spi_regmap);
@@ -2591,6 +2591,6 @@ const struct regmap_config cs47l90_32bit_i2c_regmap = {
.readable_reg = cs47l90_32bit_readable_register,
.volatile_reg = cs47l90_32bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
EXPORT_SYMBOL_GPL(cs47l90_32bit_i2c_regmap);
diff --git a/drivers/mfd/cs47l92-tables.c b/drivers/mfd/cs47l92-tables.c
index f296e355df4d..4d9ba865aaf6 100644
--- a/drivers/mfd/cs47l92-tables.c
+++ b/drivers/mfd/cs47l92-tables.c
@@ -1890,7 +1890,7 @@ const struct regmap_config cs47l92_16bit_spi_regmap = {
.readable_reg = &cs47l92_16bit_readable_register,
.volatile_reg = &cs47l92_16bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = cs47l92_reg_default,
.num_reg_defaults = ARRAY_SIZE(cs47l92_reg_default),
};
@@ -1907,7 +1907,7 @@ const struct regmap_config cs47l92_16bit_i2c_regmap = {
.readable_reg = &cs47l92_16bit_readable_register,
.volatile_reg = &cs47l92_16bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = cs47l92_reg_default,
.num_reg_defaults = ARRAY_SIZE(cs47l92_reg_default),
};
@@ -1926,7 +1926,7 @@ const struct regmap_config cs47l92_32bit_spi_regmap = {
.readable_reg = &cs47l92_32bit_readable_register,
.volatile_reg = &cs47l92_32bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
EXPORT_SYMBOL_GPL(cs47l92_32bit_spi_regmap);
@@ -1942,6 +1942,6 @@ const struct regmap_config cs47l92_32bit_i2c_regmap = {
.readable_reg = &cs47l92_32bit_readable_register,
.volatile_reg = &cs47l92_32bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
EXPORT_SYMBOL_GPL(cs47l92_32bit_i2c_regmap);
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index 541e2d47677e..fd000a21bcba 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -13,14 +13,11 @@
#include <linux/mfd/core.h>
#include <linux/i2c.h>
#include <linux/err.h>
+#include <linux/of.h>
#include <linux/mfd/da9052/da9052.h>
#include <linux/mfd/da9052/reg.h>
-#ifdef CONFIG_OF
-#include <linux/of.h>
-#include <linux/of_device.h>
-#endif
/* I2C safe register check */
static inline bool i2c_safe_reg(unsigned char reg)
diff --git a/drivers/mfd/da9055-i2c.c b/drivers/mfd/da9055-i2c.c
index bbaf4f07f274..9a5f51b60bad 100644
--- a/drivers/mfd/da9055-i2c.c
+++ b/drivers/mfd/da9055-i2c.c
@@ -11,7 +11,6 @@
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mfd/da9055/core.h>
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index 48f58b6f5629..45da007d3e70 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -9,7 +9,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/interrupt.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/irq.h>
#include <linux/mfd/core.h>
diff --git a/drivers/mfd/exynos-lpass.c b/drivers/mfd/exynos-lpass.c
index 166cd21088cd..1506d8d352b1 100644
--- a/drivers/mfd/exynos-lpass.c
+++ b/drivers/mfd/exynos-lpass.c
@@ -109,14 +109,12 @@ static int exynos_lpass_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct exynos_lpass *lpass;
void __iomem *base_top;
- struct resource *res;
lpass = devm_kzalloc(dev, sizeof(*lpass), GFP_KERNEL);
if (!lpass)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base_top = devm_ioremap_resource(dev, res);
+ base_top = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base_top))
return PTR_ERR(base_top);
diff --git a/drivers/mfd/hi6421-pmic-core.c b/drivers/mfd/hi6421-pmic-core.c
index cb5cf4a81c06..a6a890537a1e 100644
--- a/drivers/mfd/hi6421-pmic-core.c
+++ b/drivers/mfd/hi6421-pmic-core.c
@@ -59,7 +59,7 @@ static int hi6421_pmic_probe(struct platform_device *pdev)
id = of_match_device(of_hi6421_pmic_match, &pdev->dev);
if (!id)
return -EINVAL;
- type = (enum hi6421_type)id->data;
+ type = (uintptr_t)id->data;
pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
if (!pmic)
diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c
index a58e42ddcd0c..8feae8d8fd9d 100644
--- a/drivers/mfd/hi655x-pmic.c
+++ b/drivers/mfd/hi655x-pmic.c
@@ -16,7 +16,7 @@
#include <linux/mfd/hi655x-pmic.h>
#include <linux/module.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_platform.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -100,8 +100,7 @@ static int hi655x_pmic_probe(struct platform_device *pdev)
return -ENOMEM;
pmic->dev = dev;
- pmic->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, pmic->res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -163,7 +162,7 @@ MODULE_DEVICE_TABLE(of, hi655x_pmic_match);
static struct platform_driver hi655x_pmic_driver = {
.driver = {
.name = "hi655x-pmic",
- .of_match_table = of_match_ptr(hi655x_pmic_match),
+ .of_match_table = hi655x_pmic_match,
},
.probe = hi655x_pmic_probe,
.remove = hi655x_pmic_remove,
diff --git a/drivers/mfd/ipaq-micro.c b/drivers/mfd/ipaq-micro.c
index 6d3968458e81..c964ea6539aa 100644
--- a/drivers/mfd/ipaq-micro.c
+++ b/drivers/mfd/ipaq-micro.c
@@ -78,8 +78,6 @@ EXPORT_SYMBOL(ipaq_micro_tx_msg);
static void micro_rx_msg(struct ipaq_micro *micro, u8 id, int len, u8 *data)
{
- int i;
-
dev_dbg(micro->dev, "RX msg: %02x, %d bytes\n", id, len);
spin_lock(&micro->lock);
@@ -131,10 +129,7 @@ static void micro_rx_msg(struct ipaq_micro *micro, u8 id, int len, u8 *data)
break;
default:
dev_err(micro->dev,
- "unknown msg %d [%d] ", id, len);
- for (i = 0; i < len; ++i)
- pr_cont("0x%02x ", data[i]);
- pr_cont("\n");
+ "unknown msg %d [%d] %*ph\n", id, len, len, data);
}
spin_unlock(&micro->lock);
}
diff --git a/drivers/mfd/iqs62x.c b/drivers/mfd/iqs62x.c
index dfe9cb79e6a1..e03b4d38fbb0 100644
--- a/drivers/mfd/iqs62x.c
+++ b/drivers/mfd/iqs62x.c
@@ -27,7 +27,7 @@
#include <linux/mfd/iqs62x.h>
#include <linux/module.h>
#include <linux/notifier.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/mfd/lochnagar-i2c.c b/drivers/mfd/lochnagar-i2c.c
index 3c8843117080..59092f839d65 100644
--- a/drivers/mfd/lochnagar-i2c.c
+++ b/drivers/mfd/lochnagar-i2c.c
@@ -379,7 +379,7 @@ static int lochnagar_i2c_probe(struct i2c_client *i2c)
static struct i2c_driver lochnagar_i2c_driver = {
.driver = {
.name = "lochnagar",
- .of_match_table = of_match_ptr(lochnagar_of_match),
+ .of_match_table = lochnagar_of_match,
.suppress_bind_attrs = true,
},
.probe = lochnagar_i2c_probe,
diff --git a/drivers/mfd/lp873x.c b/drivers/mfd/lp873x.c
index 6639f0fad4ea..de7ab7aed3c6 100644
--- a/drivers/mfd/lp873x.c
+++ b/drivers/mfd/lp873x.c
@@ -7,8 +7,8 @@
#include <linux/interrupt.h>
#include <linux/mfd/core.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/mfd/lp873x.h>
diff --git a/drivers/mfd/lp87565.c b/drivers/mfd/lp87565.c
index 88ce4d7c50a7..1b7f8349911d 100644
--- a/drivers/mfd/lp87565.c
+++ b/drivers/mfd/lp87565.c
@@ -92,7 +92,7 @@ static int lp87565_probe(struct i2c_client *client)
of_id = of_match_device(of_lp87565_match_table, &client->dev);
if (of_id)
- lp87565->dev_type = (enum lp87565_device_type)of_id->data;
+ lp87565->dev_type = (uintptr_t)of_id->data;
i2c_set_clientdata(client, lp87565);
diff --git a/drivers/mfd/madera-i2c.c b/drivers/mfd/madera-i2c.c
index 0968aa9733ac..a404ea26bc79 100644
--- a/drivers/mfd/madera-i2c.c
+++ b/drivers/mfd/madera-i2c.c
@@ -10,7 +10,6 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/mfd/madera/core.h>
diff --git a/drivers/mfd/madera-spi.c b/drivers/mfd/madera-spi.c
index da84eb50e53a..ad07ebe29e59 100644
--- a/drivers/mfd/madera-spi.c
+++ b/drivers/mfd/madera-spi.c
@@ -9,7 +9,6 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 25ed8846b7fb..1f4f5002595c 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -402,8 +402,7 @@ static int max14577_i2c_probe(struct i2c_client *i2c)
of_id = of_match_device(max14577_dt_match, &i2c->dev);
if (of_id)
- max14577->dev_type =
- (enum maxim_device_type)of_id->data;
+ max14577->dev_type = (uintptr_t)of_id->data;
} else {
max14577->dev_type = id->driver_data;
}
diff --git a/drivers/mfd/max77541.c b/drivers/mfd/max77541.c
index e147e949c2b3..10c2e274b4af 100644
--- a/drivers/mfd/max77541.c
+++ b/drivers/mfd/max77541.c
@@ -173,7 +173,7 @@ static int max77541_probe(struct i2c_client *client)
i2c_set_clientdata(client, max77541);
max77541->i2c = client;
- max77541->id = (enum max7754x_ids)device_get_match_data(dev);
+ max77541->id = (uintptr_t)device_get_match_data(dev);
if (!max77541->id)
max77541->id = (enum max7754x_ids)id->driver_data;
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index 5811ed8f4840..e63e8e47d908 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -30,7 +30,6 @@
#include <linux/mfd/max77620.h>
#include <linux/init.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index 01833086ca7d..91c286c4571c 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -20,7 +20,6 @@
#include <linux/mfd/max77686-private.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/of_device.h>
static const struct mfd_cell max77686_devs[] = {
{ .name = "max77686-pmic", },
diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c
index b3689c13a14d..fcff0c498c0f 100644
--- a/drivers/mfd/max77843.c
+++ b/drivers/mfd/max77843.c
@@ -13,7 +13,7 @@
#include <linux/mfd/core.h>
#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77843-private.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
static const struct mfd_cell max77843_devs[] = {
diff --git a/drivers/mfd/max8907.c b/drivers/mfd/max8907.c
index 78b5ee688dec..8bbe7979db91 100644
--- a/drivers/mfd/max8907.c
+++ b/drivers/mfd/max8907.c
@@ -15,7 +15,6 @@
#include <linux/mfd/max8907.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 0246bbe80354..105d79b91493 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -17,7 +17,6 @@
#include <linux/mfd/core.h>
#include <linux/mfd/max8925.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
static const struct resource bk_resources[] = {
{ 0x84, 0x84, "mode control", IORESOURCE_REG, },
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 94c09a5eab32..110bef71f208 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -11,7 +11,6 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index 33a3ec5464fb..4cc426a6c767 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -12,7 +12,6 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
index de59b498c925..6bc0e755ba34 100644
--- a/drivers/mfd/mc13xxx-i2c.c
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -53,7 +53,6 @@ static const struct regmap_config mc13xxx_regmap_i2c_config = {
static int mc13xxx_i2c_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct mc13xxx *mc13xxx;
int ret;
@@ -73,13 +72,7 @@ static int mc13xxx_i2c_probe(struct i2c_client *client)
return ret;
}
- if (client->dev.of_node) {
- const struct of_device_id *of_id =
- of_match_device(mc13xxx_dt_ids, &client->dev);
- mc13xxx->variant = of_id->data;
- } else {
- mc13xxx->variant = (void *)id->driver_data;
- }
+ mc13xxx->variant = i2c_get_match_data(client);
return mc13xxx_common_init(&client->dev);
}
diff --git a/drivers/mfd/mt6358-irq.c b/drivers/mfd/mt6358-irq.c
index 389756436af6..49830b526ee8 100644
--- a/drivers/mfd/mt6358-irq.c
+++ b/drivers/mfd/mt6358-irq.c
@@ -3,6 +3,8 @@
// Copyright (c) 2020 MediaTek Inc.
#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/mfd/mt6357/core.h>
#include <linux/mfd/mt6357/registers.h>
#include <linux/mfd/mt6358/core.h>
@@ -11,9 +13,6 @@
#include <linux/mfd/mt6359/registers.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index f6c1f80f94a4..4449dde05021 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -6,9 +6,10 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
+#include <linux/irqdomain.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/core.h>
#include <linux/mfd/mt6323/core.h>
diff --git a/drivers/mfd/mt6397-irq.c b/drivers/mfd/mt6397-irq.c
index 72f923e47752..886745b5b607 100644
--- a/drivers/mfd/mt6397-irq.c
+++ b/drivers/mfd/mt6397-irq.c
@@ -3,10 +3,9 @@
// Copyright (c) 2019 MediaTek Inc.
#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/suspend.h>
diff --git a/drivers/mfd/mxs-lradc.c b/drivers/mfd/mxs-lradc.c
index 111d11fd25aa..21f3033d6eb5 100644
--- a/drivers/mfd/mxs-lradc.c
+++ b/drivers/mfd/mxs-lradc.c
@@ -142,7 +142,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
if (!of_id)
return -EINVAL;
- lradc->soc = (enum mxs_lradc_id)of_id->data;
+ lradc->soc = (uintptr_t)of_id->data;
lradc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(lradc->clk)) {
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 7f5775109593..78f1bb55dbc0 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -534,7 +534,6 @@ static int usbhs_omap_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct usbhs_omap_platform_data *pdata = dev_get_platdata(dev);
struct usbhs_hcd_omap *omap;
- struct resource *res;
int ret = 0;
int i;
bool need_logic_fck;
@@ -569,8 +568,7 @@ static int usbhs_omap_probe(struct platform_device *pdev)
return -ENOMEM;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- omap->uhh_base = devm_ioremap_resource(dev, res);
+ omap->uhh_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(omap->uhh_base))
return PTR_ERR(omap->uhh_base);
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index 69cbc2097911..906353735c78 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -200,15 +200,13 @@ static unsigned ohci_omap3_fslsmode(enum usbhs_omap_port_mode mode)
static int usbtll_omap_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct usbtll_omap *tll;
void __iomem *base;
int i, nch, ver;
dev_dbg(dev, "starting TI HSUSB TLL Controller\n");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index a36f12402987..6e562bab62e4 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -18,7 +18,8 @@
#include <linux/err.h>
#include <linux/mfd/core.h>
#include <linux/mfd/palmas.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
static const struct regmap_config palmas_regmap_config[PALMAS_NUM_CLIENTS] = {
{
diff --git a/drivers/mfd/qcom-pm8008.c b/drivers/mfd/qcom-pm8008.c
index 94a8cca1d955..3ac3742f438b 100644
--- a/drivers/mfd/qcom-pm8008.c
+++ b/drivers/mfd/qcom-pm8008.c
@@ -9,7 +9,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/pinctrl/consumer.h>
#include <linux/regmap.h>
diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c
index 9a948df8c28d..07c531bd1236 100644
--- a/drivers/mfd/qcom-pm8xxx.c
+++ b/drivers/mfd/qcom-pm8xxx.c
@@ -103,8 +103,9 @@ static int
pm8xxx_config_irq(struct pm_irq_chip *chip, unsigned int bp, unsigned int cp)
{
int rc;
+ unsigned long flags;
- spin_lock(&chip->pm_irq_lock);
+ spin_lock_irqsave(&chip->pm_irq_lock, flags);
rc = regmap_write(chip->regmap, SSBI_REG_ADDR_IRQ_BLK_SEL, bp);
if (rc) {
pr_err("Failed Selecting Block %d rc=%d\n", bp, rc);
@@ -116,7 +117,7 @@ pm8xxx_config_irq(struct pm_irq_chip *chip, unsigned int bp, unsigned int cp)
if (rc)
pr_err("Failed Configuring IRQ rc=%d\n", rc);
bail:
- spin_unlock(&chip->pm_irq_lock);
+ spin_unlock_irqrestore(&chip->pm_irq_lock, flags);
return rc;
}
@@ -321,6 +322,7 @@ static int pm8xxx_irq_get_irqchip_state(struct irq_data *d,
struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
unsigned int pmirq = irqd_to_hwirq(d);
unsigned int bits;
+ unsigned long flags;
int irq_bit;
u8 block;
int rc;
@@ -331,7 +333,7 @@ static int pm8xxx_irq_get_irqchip_state(struct irq_data *d,
block = pmirq / 8;
irq_bit = pmirq % 8;
- spin_lock(&chip->pm_irq_lock);
+ spin_lock_irqsave(&chip->pm_irq_lock, flags);
rc = regmap_write(chip->regmap, SSBI_REG_ADDR_IRQ_BLK_SEL, block);
if (rc) {
pr_err("Failed Selecting Block %d rc=%d\n", block, rc);
@@ -346,7 +348,7 @@ static int pm8xxx_irq_get_irqchip_state(struct irq_data *d,
*state = !!(bits & BIT(irq_bit));
bail:
- spin_unlock(&chip->pm_irq_lock);
+ spin_unlock_irqrestore(&chip->pm_irq_lock, flags);
return rc;
}
diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
index 545196c85b5c..da50eba10014 100644
--- a/drivers/mfd/rave-sp.c
+++ b/drivers/mfd/rave-sp.c
@@ -18,7 +18,7 @@
#include <linux/mfd/rave-sp.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/sched.h>
#include <linux/serdev.h>
#include <asm/unaligned.h>
diff --git a/drivers/mfd/rk8xx-core.c b/drivers/mfd/rk8xx-core.c
index e8fc9e2ab1d0..11a831e92da8 100644
--- a/drivers/mfd/rk8xx-core.c
+++ b/drivers/mfd/rk8xx-core.c
@@ -14,7 +14,7 @@
#include <linux/mfd/rk808.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/reboot.h>
diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
index 333fef8729a5..0fe616b2db8e 100644
--- a/drivers/mfd/rn5t618.c
+++ b/drivers/mfd/rn5t618.c
@@ -277,7 +277,7 @@ static SIMPLE_DEV_PM_OPS(rn5t618_i2c_dev_pm_ops,
static struct i2c_driver rn5t618_i2c_driver = {
.driver = {
.name = "rn5t618",
- .of_match_table = of_match_ptr(rn5t618_of_match),
+ .of_match_table = rn5t618_of_match,
.pm = &rn5t618_i2c_dev_pm_ops,
},
.probe = rn5t618_i2c_probe,
diff --git a/drivers/mfd/rohm-bd71828.c b/drivers/mfd/rohm-bd71828.c
index 93d80a79b901..594718f7e8e1 100644
--- a/drivers/mfd/rohm-bd71828.c
+++ b/drivers/mfd/rohm-bd71828.c
@@ -15,7 +15,7 @@
#include <linux/mfd/rohm-bd71828.h>
#include <linux/mfd/rohm-generic.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/types.h>
diff --git a/drivers/mfd/rohm-bd718x7.c b/drivers/mfd/rohm-bd718x7.c
index 0b58ecc78334..4798bdf27afb 100644
--- a/drivers/mfd/rohm-bd718x7.c
+++ b/drivers/mfd/rohm-bd718x7.c
@@ -14,7 +14,7 @@
#include <linux/mfd/rohm-bd718x7.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/types.h>
diff --git a/drivers/mfd/rohm-bd9576.c b/drivers/mfd/rohm-bd9576.c
index 645673322ec0..bceac7016740 100644
--- a/drivers/mfd/rohm-bd9576.c
+++ b/drivers/mfd/rohm-bd9576.c
@@ -13,7 +13,7 @@
#include <linux/mfd/rohm-bd957x.h>
#include <linux/mfd/rohm-generic.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/types.h>
diff --git a/drivers/mfd/rsmu_i2c.c b/drivers/mfd/rsmu_i2c.c
index 26972a5aff45..06d78a1cf1cc 100644
--- a/drivers/mfd/rsmu_i2c.c
+++ b/drivers/mfd/rsmu_i2c.c
@@ -277,7 +277,7 @@ MODULE_DEVICE_TABLE(of, rsmu_i2c_of_match);
static struct i2c_driver rsmu_i2c_driver = {
.driver = {
.name = "rsmu-i2c",
- .of_match_table = of_match_ptr(rsmu_i2c_of_match),
+ .of_match_table = rsmu_i2c_of_match,
},
.probe = rsmu_i2c_probe,
.remove = rsmu_i2c_remove,
diff --git a/drivers/mfd/rsmu_spi.c b/drivers/mfd/rsmu_spi.c
index a4a595bb8d0d..ca0a1202c3ce 100644
--- a/drivers/mfd/rsmu_spi.c
+++ b/drivers/mfd/rsmu_spi.c
@@ -262,7 +262,7 @@ MODULE_DEVICE_TABLE(of, rsmu_spi_of_match);
static struct spi_driver rsmu_spi_driver = {
.driver = {
.name = "rsmu-spi",
- .of_match_table = of_match_ptr(rsmu_spi_of_match),
+ .of_match_table = rsmu_spi_of_match,
},
.probe = rsmu_spi_probe,
.remove = rsmu_spi_remove,
diff --git a/drivers/mfd/rt5033.c b/drivers/mfd/rt5033.c
index 67b0a228db24..7e23ab3d5842 100644
--- a/drivers/mfd/rt5033.c
+++ b/drivers/mfd/rt5033.c
@@ -10,9 +10,9 @@
*/
#include <linux/err.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/of_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/rt5033.h>
#include <linux/mfd/rt5033-private.h>
diff --git a/drivers/mfd/rz-mtu3.c b/drivers/mfd/rz-mtu3.c
index 04006f4aa702..f3dac4a29a83 100644
--- a/drivers/mfd/rz-mtu3.c
+++ b/drivers/mfd/rz-mtu3.c
@@ -11,7 +11,9 @@
#include <linux/irq.h>
#include <linux/mfd/core.h>
#include <linux/mfd/rz-mtu3.h>
-#include <linux/of_platform.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/spinlock.h>
@@ -20,7 +22,7 @@
struct rz_mtu3_priv {
void __iomem *mmio;
struct reset_control *rstc;
- raw_spinlock_t lock;
+ spinlock_t lock;
};
/******* MTU3 registers (original offset is +0x1200) *******/
@@ -174,11 +176,11 @@ void rz_mtu3_shared_reg_update_bit(struct rz_mtu3_channel *ch, u16 offset,
struct rz_mtu3_priv *priv = mtu->priv_data;
unsigned long tmdr, flags;
- raw_spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
tmdr = rz_mtu3_shared_reg_read(ch, offset);
__assign_bit(pos, &tmdr, !!val);
rz_mtu3_shared_reg_write(ch, offset, tmdr);
- raw_spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
EXPORT_SYMBOL_GPL(rz_mtu3_shared_reg_update_bit);
@@ -250,16 +252,17 @@ static void rz_mtu3_start_stop_ch(struct rz_mtu3_channel *ch, bool start)
u16 offset;
u8 bitpos;
- /* start stop register shared by multiple timer channels */
- raw_spin_lock_irqsave(&priv->lock, flags);
-
offset = rz_mtu3_get_tstr_offset(ch);
bitpos = rz_mtu3_get_tstr_bit_pos(ch);
+
+ /* start stop register shared by multiple timer channels */
+ spin_lock_irqsave(&priv->lock, flags);
+
tstr = rz_mtu3_shared_reg_read(ch, offset);
__assign_bit(bitpos, &tstr, start);
rz_mtu3_shared_reg_write(ch, offset, tstr);
- raw_spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
bool rz_mtu3_is_enabled(struct rz_mtu3_channel *ch)
@@ -267,21 +270,18 @@ bool rz_mtu3_is_enabled(struct rz_mtu3_channel *ch)
struct rz_mtu3 *mtu = dev_get_drvdata(ch->dev->parent);
struct rz_mtu3_priv *priv = mtu->priv_data;
unsigned long flags, tstr;
- bool ret = false;
u16 offset;
u8 bitpos;
- /* start stop register shared by multiple timer channels */
- raw_spin_lock_irqsave(&priv->lock, flags);
-
offset = rz_mtu3_get_tstr_offset(ch);
bitpos = rz_mtu3_get_tstr_bit_pos(ch);
- tstr = rz_mtu3_shared_reg_read(ch, offset);
- ret = tstr & BIT(bitpos);
- raw_spin_unlock_irqrestore(&priv->lock, flags);
+ /* start stop register shared by multiple timer channels */
+ spin_lock_irqsave(&priv->lock, flags);
+ tstr = rz_mtu3_shared_reg_read(ch, offset);
+ spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
+ return tstr & BIT(bitpos);
}
EXPORT_SYMBOL_GPL(rz_mtu3_is_enabled);
@@ -349,7 +349,7 @@ static int rz_mtu3_probe(struct platform_device *pdev)
return PTR_ERR(ddata->clk);
reset_control_deassert(priv->rstc);
- raw_spin_lock_init(&priv->lock);
+ spin_lock_init(&priv->lock);
platform_set_drvdata(pdev, ddata);
for (i = 0; i < RZ_MTU_NUM_CHANNELS; i++) {
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index d2f631901886..a6b0d7300b2d 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -10,8 +10,6 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index d21f32cc784d..81e517cdfb27 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -8,7 +8,7 @@
#include <linux/module.h>
#include <linux/mfd/core.h>
#include <linux/mfd/sc27xx-pmic.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
diff --git a/drivers/mfd/ssbi.c b/drivers/mfd/ssbi.c
index dee89db3471d..b0b0be483dbf 100644
--- a/drivers/mfd/ssbi.c
+++ b/drivers/mfd/ssbi.c
@@ -14,12 +14,12 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/ssbi.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
/* SSBI 2.0 controller registers */
#define SSBI2_CMD 0x0008
diff --git a/drivers/mfd/stm32-lptimer.c b/drivers/mfd/stm32-lptimer.c
index fa322f4412c8..b2704a9809c7 100644
--- a/drivers/mfd/stm32-lptimer.c
+++ b/drivers/mfd/stm32-lptimer.c
@@ -9,6 +9,7 @@
#include <linux/mfd/stm32-lptimer.h>
#include <linux/module.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#define STM32_LPTIM_MAX_REGISTER 0x3fc
diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c
index 44ed2fce0319..732a28db80fa 100644
--- a/drivers/mfd/stm32-timers.c
+++ b/drivers/mfd/stm32-timers.c
@@ -8,6 +8,7 @@
#include <linux/mfd/stm32-timers.h>
#include <linux/module.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#define STM32_TIMERS_MAX_REGISTERS 0x3fc
@@ -226,8 +227,7 @@ static int stm32_timers_probe(struct platform_device *pdev)
if (!ddata)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mmio = devm_ioremap_resource(dev, res);
+ mmio = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(mmio))
return PTR_ERR(mmio);
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index 1d7b401776d1..fe018bedab98 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -87,7 +87,7 @@ stmpe_i2c_probe(struct i2c_client *i2c)
dev_info(&i2c->dev, "matching on node name, compatible is preferred\n");
partnum = id->driver_data;
} else
- partnum = (enum stmpe_partnum)of_id->data;
+ partnum = (uintptr_t)of_id->data;
return stmpe_probe(&i2c_ci, partnum);
}
diff --git a/drivers/mfd/stpmic1.c b/drivers/mfd/stpmic1.c
index 3cc7492f828f..c5128fe96cc7 100644
--- a/drivers/mfd/stpmic1.c
+++ b/drivers/mfd/stpmic1.c
@@ -219,7 +219,7 @@ MODULE_DEVICE_TABLE(of, stpmic1_of_match);
static struct i2c_driver stpmic1_driver = {
.driver = {
.name = "stpmic1",
- .of_match_table = of_match_ptr(stpmic1_of_match),
+ .of_match_table = stpmic1_of_match,
.pm = pm_sleep_ptr(&stpmic1_pm),
},
.probe = stpmic1_probe,
diff --git a/drivers/mfd/sun4i-gpadc.c b/drivers/mfd/sun4i-gpadc.c
index d1cbea27b136..3029d48e982c 100644
--- a/drivers/mfd/sun4i-gpadc.c
+++ b/drivers/mfd/sun4i-gpadc.c
@@ -8,8 +8,8 @@
#include <linux/kernel.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/sun4i-gpadc.h>
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index 16df64e3c0be..db28eb0c8995 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -340,7 +340,7 @@ tc3589x_of_probe(struct device *dev, enum tc3589x_version *version)
of_id = of_match_device(tc3589x_match, dev);
if (!of_id)
return ERR_PTR(-ENODEV);
- *version = (enum tc3589x_version) of_id->data;
+ *version = (uintptr_t) of_id->data;
for_each_child_of_node(np, child) {
if (of_device_is_compatible(child, "toshiba,tc3589x-gpio"))
@@ -483,7 +483,7 @@ static struct i2c_driver tc3589x_driver = {
.driver = {
.name = "tc3589x",
.pm = pm_sleep_ptr(&tc3589x_dev_pm_ops),
- .of_match_table = of_match_ptr(tc3589x_match),
+ .of_match_table = tc3589x_match,
},
.probe = tc3589x_probe,
.remove = tc3589x_remove,
diff --git a/drivers/mfd/ti-lmu.c b/drivers/mfd/ti-lmu.c
index 4f06adad7b5e..cfc9f88b9842 100644
--- a/drivers/mfd/ti-lmu.c
+++ b/drivers/mfd/ti-lmu.c
@@ -17,7 +17,6 @@
#include <linux/mfd/ti-lmu-register.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/slab.h>
struct ti_lmu_data {
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index 07825cfd8aa8..b88eb70c17b3 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -14,7 +14,7 @@
#include <linux/mfd/core.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/mfd/ti_am335x_tscadc.h>
@@ -201,8 +201,7 @@ static int ti_tscadc_probe(struct platform_device *pdev)
else
tscadc->irq = err;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tscadc->tscadc_base = devm_ioremap_resource(&pdev->dev, res);
+ tscadc->tscadc_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(tscadc->tscadc_base))
return PTR_ERR(tscadc->tscadc_base);
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
index 9716bf703c7a..95dafb0e9f00 100644
--- a/drivers/mfd/tps6507x.c
+++ b/drivers/mfd/tps6507x.c
@@ -20,7 +20,6 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps6507x.h>
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index a35ad70755fb..9245e11219f3 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -17,7 +17,6 @@
#include <linux/mfd/core.h>
#include <linux/mfd/tps65090.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/err.h>
#define NUM_INT_REG 2
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 60599291b315..029ecc32f078 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 619bf7adb20c..11e4e52b56be 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -15,7 +15,6 @@
#include <linux/regmap.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
diff --git a/drivers/mfd/tps6594-core.c b/drivers/mfd/tps6594-core.c
index 15f314833207..0fb9c5cf213a 100644
--- a/drivers/mfd/tps6594-core.c
+++ b/drivers/mfd/tps6594-core.c
@@ -9,7 +9,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps6594.h>
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index d85675a4d9a8..9ce34dfd99b3 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -16,8 +16,6 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/i2c.h>
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index 6bba39657991..f77ecc635b6f 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -1938,7 +1938,7 @@ const struct regmap_config wm5102_i2c_regmap = {
.readable_reg = wm5102_readable_register,
.volatile_reg = wm5102_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm5102_reg_default,
.num_reg_defaults = ARRAY_SIZE(wm5102_reg_default),
};
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index 65b9b1d6daec..eba324875afd 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -3218,7 +3218,7 @@ const struct regmap_config wm5110_i2c_regmap = {
.readable_reg = wm5110_readable_register,
.volatile_reg = wm5110_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm5110_reg_default,
.num_reg_defaults = ARRAY_SIZE(wm5110_reg_default),
};
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index e86b6a4896a6..e7e68929275e 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -15,8 +15,7 @@
#include <linux/mfd/core.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
diff --git a/drivers/mfd/wm831x-i2c.c b/drivers/mfd/wm831x-i2c.c
index 997837f13180..694ddbbf0372 100644
--- a/drivers/mfd/wm831x-i2c.c
+++ b/drivers/mfd/wm831x-i2c.c
@@ -36,7 +36,7 @@ static int wm831x_i2c_probe(struct i2c_client *i2c)
dev_err(&i2c->dev, "Failed to match device\n");
return -ENODEV;
}
- type = (enum wm831x_parent)of_id->data;
+ type = (uintptr_t)of_id->data;
} else {
type = (enum wm831x_parent)id->driver_data;
}
diff --git a/drivers/mfd/wm831x-spi.c b/drivers/mfd/wm831x-spi.c
index 7bcddccbf155..76be7ef5c970 100644
--- a/drivers/mfd/wm831x-spi.c
+++ b/drivers/mfd/wm831x-spi.c
@@ -33,7 +33,7 @@ static int wm831x_spi_probe(struct spi_device *spi)
dev_err(&spi->dev, "Failed to match device\n");
return -ENODEV;
}
- type = (enum wm831x_parent)of_id->data;
+ type = (uintptr_t)of_id->data;
} else {
type = (enum wm831x_parent)id->driver_data;
}
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 1e4f1694f065..aba7af688175 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -628,7 +628,7 @@ static int wm8994_i2c_probe(struct i2c_client *i2c)
if (i2c->dev.of_node) {
of_id = of_match_device(wm8994_of_match, &i2c->dev);
if (of_id)
- wm8994->type = (enum wm8994_type)of_id->data;
+ wm8994->type = (uintptr_t)of_id->data;
} else {
wm8994->type = id->driver_data;
}
diff --git a/drivers/mfd/wm8994-regmap.c b/drivers/mfd/wm8994-regmap.c
index cd4fef7df336..ee2ed6773afd 100644
--- a/drivers/mfd/wm8994-regmap.c
+++ b/drivers/mfd/wm8994-regmap.c
@@ -1238,7 +1238,7 @@ struct regmap_config wm1811_regmap_config = {
.reg_bits = 16,
.val_bits = 16,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm1811_defaults,
.num_reg_defaults = ARRAY_SIZE(wm1811_defaults),
@@ -1253,7 +1253,7 @@ struct regmap_config wm8994_regmap_config = {
.reg_bits = 16,
.val_bits = 16,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm8994_defaults,
.num_reg_defaults = ARRAY_SIZE(wm8994_defaults),
@@ -1268,7 +1268,7 @@ struct regmap_config wm8958_regmap_config = {
.reg_bits = 16,
.val_bits = 16,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm8958_defaults,
.num_reg_defaults = ARRAY_SIZE(wm8958_defaults),
diff --git a/drivers/mfd/wm8997-tables.c b/drivers/mfd/wm8997-tables.c
index 3476787c485e..288c57b2d21e 100644
--- a/drivers/mfd/wm8997-tables.c
+++ b/drivers/mfd/wm8997-tables.c
@@ -1523,7 +1523,7 @@ const struct regmap_config wm8997_i2c_regmap = {
.readable_reg = wm8997_readable_register,
.volatile_reg = wm8997_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm8997_reg_default,
.num_reg_defaults = ARRAY_SIZE(wm8997_reg_default),
};
diff --git a/drivers/mfd/wm8998-tables.c b/drivers/mfd/wm8998-tables.c
index 9b34a6d76094..b3e6e85bee89 100644
--- a/drivers/mfd/wm8998-tables.c
+++ b/drivers/mfd/wm8998-tables.c
@@ -1556,7 +1556,7 @@ const struct regmap_config wm8998_i2c_regmap = {
.readable_reg = wm8998_readable_register,
.volatile_reg = wm8998_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm8998_reg_default,
.num_reg_defaults = ARRAY_SIZE(wm8998_reg_default),
};
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
index 3dae5e3a1697..cd512284bfb3 100644
--- a/drivers/misc/cardreader/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -83,63 +83,20 @@ static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr)
static void rts5227_init_from_cfg(struct rtsx_pcr *pcr)
{
- struct pci_dev *pdev = pcr->pci;
- int l1ss;
- u32 lval;
struct rtsx_cr_option *option = &pcr->option;
- l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
- if (!l1ss)
- return;
-
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
-
if (CHK_PCI_PID(pcr, 0x522A)) {
- if (0 == (lval & 0x0F))
- rtsx_pci_enable_oobs_polling(pcr);
- else
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
rtsx_pci_disable_oobs_polling(pcr);
+ else
+ rtsx_pci_enable_oobs_polling(pcr);
}
- if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
- rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
- else
- rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
- rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
- else
- rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
- rtsx_set_dev_flag(pcr, PM_L1_1_EN);
- else
- rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
- rtsx_set_dev_flag(pcr, PM_L1_2_EN);
- else
- rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
-
if (option->ltr_en) {
- u16 val;
-
- pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
- if (val & PCI_EXP_DEVCTL2_LTR_EN) {
- option->ltr_enabled = true;
- option->ltr_active = true;
+ if (option->ltr_enabled)
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
- } else {
- option->ltr_enabled = false;
- }
}
-
- if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
- | PM_L1_1_EN | PM_L1_2_EN))
- option->force_clkreq_0 = false;
- else
- option->force_clkreq_0 = true;
-
}
static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
@@ -195,7 +152,7 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
}
}
- if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG)
+ if (option->force_clkreq_0)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
else
diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c
index f4ab09439da7..0c7f10bcf6f1 100644
--- a/drivers/misc/cardreader/rts5228.c
+++ b/drivers/misc/cardreader/rts5228.c
@@ -386,59 +386,25 @@ static void rts5228_process_ocp(struct rtsx_pcr *pcr)
static void rts5228_init_from_cfg(struct rtsx_pcr *pcr)
{
- struct pci_dev *pdev = pcr->pci;
- int l1ss;
- u32 lval;
struct rtsx_cr_option *option = &pcr->option;
- l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
- if (!l1ss)
- return;
-
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
-
- if (0 == (lval & 0x0F))
- rtsx_pci_enable_oobs_polling(pcr);
- else
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
rtsx_pci_disable_oobs_polling(pcr);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
- rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
- else
- rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
- rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
- else
- rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
- rtsx_set_dev_flag(pcr, PM_L1_1_EN);
else
- rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
- rtsx_set_dev_flag(pcr, PM_L1_2_EN);
- else
- rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
+ rtsx_pci_enable_oobs_polling(pcr);
rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
- if (option->ltr_en) {
- u16 val;
- pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
- if (val & PCI_EXP_DEVCTL2_LTR_EN) {
- option->ltr_enabled = true;
- option->ltr_active = true;
+ if (option->ltr_en) {
+ if (option->ltr_enabled)
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
- } else {
- option->ltr_enabled = false;
- }
}
}
static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
{
+ struct rtsx_cr_option *option = &pcr->option;
rtsx_pci_write_register(pcr, RTS5228_AUTOLOAD_CFG1,
CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
@@ -469,6 +435,17 @@ static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
else
rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
if (pcr->rtd3_en) {
diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
index 47ab72a43256..6c81040e18be 100644
--- a/drivers/misc/cardreader/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -86,64 +86,22 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
static void rts5249_init_from_cfg(struct rtsx_pcr *pcr)
{
- struct pci_dev *pdev = pcr->pci;
- int l1ss;
struct rtsx_cr_option *option = &(pcr->option);
- u32 lval;
-
- l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
- if (!l1ss)
- return;
-
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) {
- if (0 == (lval & 0x0F))
- rtsx_pci_enable_oobs_polling(pcr);
- else
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
rtsx_pci_disable_oobs_polling(pcr);
+ else
+ rtsx_pci_enable_oobs_polling(pcr);
}
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
- rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
- rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
- rtsx_set_dev_flag(pcr, PM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
- rtsx_set_dev_flag(pcr, PM_L1_2_EN);
-
if (option->ltr_en) {
- u16 val;
-
- pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
- if (val & PCI_EXP_DEVCTL2_LTR_EN) {
- option->ltr_enabled = true;
- option->ltr_active = true;
+ if (option->ltr_enabled)
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
- } else {
- option->ltr_enabled = false;
- }
}
}
-static int rts5249_init_from_hw(struct rtsx_pcr *pcr)
-{
- struct rtsx_cr_option *option = &(pcr->option);
-
- if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
- | PM_L1_1_EN | PM_L1_2_EN))
- option->force_clkreq_0 = false;
- else
- option->force_clkreq_0 = true;
-
- return 0;
-}
-
static void rts52xa_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
{
/* Set relink_time to 0 */
@@ -276,7 +234,6 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
struct rtsx_cr_option *option = &(pcr->option);
rts5249_init_from_cfg(pcr);
- rts5249_init_from_hw(pcr);
rtsx_pci_init_cmd(pcr);
@@ -327,11 +284,12 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
}
}
+
/*
* If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
* to drive low, and we forcibly request clock.
*/
- if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG)
+ if (option->force_clkreq_0)
rtsx_pci_write_register(pcr, PETXCFG,
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
else
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
index 79b18f6f73a8..d2d3a6ccb8f7 100644
--- a/drivers/misc/cardreader/rts5260.c
+++ b/drivers/misc/cardreader/rts5260.c
@@ -480,47 +480,19 @@ static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr)
static void rts5260_init_from_cfg(struct rtsx_pcr *pcr)
{
- struct pci_dev *pdev = pcr->pci;
- int l1ss;
struct rtsx_cr_option *option = &pcr->option;
- u32 lval;
-
- l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
- if (!l1ss)
- return;
-
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
- rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
- rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
- rtsx_set_dev_flag(pcr, PM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
- rtsx_set_dev_flag(pcr, PM_L1_2_EN);
rts5260_pwr_saving_setting(pcr);
if (option->ltr_en) {
- u16 val;
-
- pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
- if (val & PCI_EXP_DEVCTL2_LTR_EN) {
- option->ltr_enabled = true;
- option->ltr_active = true;
+ if (option->ltr_enabled)
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
- } else {
- option->ltr_enabled = false;
- }
}
}
static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
{
+ struct rtsx_cr_option *option = &pcr->option;
/* Set mcu_cnt to 7 to ensure data can be sampled properly */
rtsx_pci_write_register(pcr, 0xFC03, 0x7F, 0x07);
@@ -539,6 +511,17 @@ static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
rts5260_init_hw(pcr);
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00);
return 0;
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
index 94af6bf8a25a..67252512a132 100644
--- a/drivers/misc/cardreader/rts5261.c
+++ b/drivers/misc/cardreader/rts5261.c
@@ -454,54 +454,17 @@ static void rts5261_init_from_hw(struct rtsx_pcr *pcr)
static void rts5261_init_from_cfg(struct rtsx_pcr *pcr)
{
- struct pci_dev *pdev = pcr->pci;
- int l1ss;
- u32 lval;
struct rtsx_cr_option *option = &pcr->option;
- l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
- if (!l1ss)
- return;
-
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
- rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
- else
- rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
- rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
- else
- rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
- rtsx_set_dev_flag(pcr, PM_L1_1_EN);
- else
- rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
- rtsx_set_dev_flag(pcr, PM_L1_2_EN);
- else
- rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
-
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
if (option->ltr_en) {
- u16 val;
-
- pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
- if (val & PCI_EXP_DEVCTL2_LTR_EN) {
- option->ltr_enabled = true;
- option->ltr_active = true;
+ if (option->ltr_enabled)
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
- } else {
- option->ltr_enabled = false;
- }
}
}
static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
{
+ struct rtsx_cr_option *option = &pcr->option;
u32 val;
rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG1,
@@ -547,6 +510,17 @@ static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
else
rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
if (pcr->rtd3_en) {
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index a3f4b52bb159..a30751ad3733 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -1326,11 +1326,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
return err;
}
- if (pcr->aspm_mode == ASPM_MODE_REG) {
+ if (pcr->aspm_mode == ASPM_MODE_REG)
rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
- rtsx_pci_write_register(pcr, PETXCFG,
- FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
- }
/* No CD interrupt if probing driver with card inserted.
* So we need to initialize pcr->card_exist here.
@@ -1345,7 +1342,9 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
{
- int err;
+ struct rtsx_cr_option *option = &(pcr->option);
+ int err, l1ss;
+ u32 lval;
u16 cfg_val;
u8 val;
@@ -1430,6 +1429,48 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
pcr->aspm_enabled = true;
}
+ l1ss = pci_find_ext_capability(pcr->pci, PCI_EXT_CAP_ID_L1SS);
+ if (l1ss) {
+ pci_read_config_dword(pcr->pci, l1ss + PCI_L1SS_CTL1, &lval);
+
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
+ rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+ else
+ rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
+
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
+ rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+ else
+ rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
+ rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+ else
+ rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
+
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
+ rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+ else
+ rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
+
+ pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &cfg_val);
+ if (cfg_val & PCI_EXP_DEVCTL2_LTR_EN) {
+ option->ltr_enabled = true;
+ option->ltr_active = true;
+ } else {
+ option->ltr_enabled = false;
+ }
+
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
+ option->force_clkreq_0 = false;
+ else
+ option->force_clkreq_0 = true;
+ } else {
+ option->ltr_enabled = false;
+ option->force_clkreq_0 = true;
+ }
+
if (pcr->ops->fetch_vendor_settings)
pcr->ops->fetch_vendor_settings(pcr);
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 153fb8d0008e..df589d9b4d70 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -32,7 +32,6 @@
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/cfi.h>
@@ -650,7 +649,7 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
/*
* Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
- * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
+ * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
* http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
* http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
* http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index a7714e3de887..22e73dd6118b 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -1599,7 +1599,7 @@ static void doc_unregister_sysfs(struct platform_device *pdev,
*/
static int flashcontrol_show(struct seq_file *s, void *p)
{
- struct docg3 *docg3 = (struct docg3 *)s->private;
+ struct docg3 *docg3 = s->private;
u8 fctrl;
@@ -1621,7 +1621,7 @@ DEFINE_SHOW_ATTRIBUTE(flashcontrol);
static int asic_mode_show(struct seq_file *s, void *p)
{
- struct docg3 *docg3 = (struct docg3 *)s->private;
+ struct docg3 *docg3 = s->private;
int pctrl, mode;
@@ -1658,7 +1658,7 @@ DEFINE_SHOW_ATTRIBUTE(asic_mode);
static int device_id_show(struct seq_file *s, void *p)
{
- struct docg3 *docg3 = (struct docg3 *)s->private;
+ struct docg3 *docg3 = s->private;
int id;
mutex_lock(&docg3->cascade->lock);
@@ -1672,7 +1672,7 @@ DEFINE_SHOW_ATTRIBUTE(device_id);
static int protection_show(struct seq_file *s, void *p)
{
- struct docg3 *docg3 = (struct docg3 *)s->private;
+ struct docg3 *docg3 = s->private;
int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high;
mutex_lock(&docg3->cascade->lock);
diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c
index 3a6ea7a6a30c..d533475fda15 100644
--- a/drivers/mtd/devices/mchp23k256.c
+++ b/drivers/mtd/devices/mchp23k256.c
@@ -15,7 +15,7 @@
#include <linux/sizes.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#define MAX_CMD_SIZE 4
diff --git a/drivers/mtd/devices/mchp48l640.c b/drivers/mtd/devices/mchp48l640.c
index 40cd5041174c..f576e6a890e8 100644
--- a/drivers/mtd/devices/mchp48l640.c
+++ b/drivers/mtd/devices/mchp48l640.c
@@ -22,7 +22,7 @@
#include <linux/sizes.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
struct mchp48_caps {
unsigned int size;
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 1d3b2a94581f..0c1b93303618 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -13,7 +13,6 @@
#include <linux/err.h>
#include <linux/math64.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index cc17133be297..0a35e5236ae5 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -937,7 +937,6 @@ static int spear_smi_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct spear_smi_plat_data *pdata = NULL;
struct spear_smi *dev;
- struct resource *smi_base;
int irq, ret = 0;
int i;
@@ -975,9 +974,7 @@ static int spear_smi_probe(struct platform_device *pdev)
goto err;
}
- smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- dev->io_base = devm_ioremap_resource(&pdev->dev, smi_base);
+ dev->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->io_base)) {
ret = PTR_ERR(dev->io_base);
goto err;
@@ -996,21 +993,17 @@ static int spear_smi_probe(struct platform_device *pdev)
dev->num_flashes = MAX_NUM_FLASH_CHIP;
}
- dev->clk = devm_clk_get(&pdev->dev, NULL);
+ dev->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(dev->clk)) {
ret = PTR_ERR(dev->clk);
goto err;
}
- ret = clk_prepare_enable(dev->clk);
- if (ret)
- goto err;
-
ret = devm_request_irq(&pdev->dev, irq, spear_smi_int_handler, 0,
pdev->name, dev);
if (ret) {
dev_err(&dev->pdev->dev, "SMI IRQ allocation failed\n");
- goto err_irq;
+ goto err;
}
mutex_init(&dev->lock);
@@ -1023,14 +1016,11 @@ static int spear_smi_probe(struct platform_device *pdev)
ret = spear_smi_setup_banks(pdev, i, pdata->np[i]);
if (ret) {
dev_err(&dev->pdev->dev, "bank setup failed\n");
- goto err_irq;
+ goto err;
}
}
return 0;
-
-err_irq:
- clk_disable_unprepare(dev->clk);
err:
return ret;
}
@@ -1059,8 +1049,6 @@ static int spear_smi_remove(struct platform_device *pdev)
WARN_ON(mtd_device_unregister(&flash->mtd));
}
- clk_disable_unprepare(dev->clk);
-
return 0;
}
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index 3dbb1aa80bfa..95530cbbb1e0 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -2016,7 +2016,6 @@ static int stfsm_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct flash_info *info;
- struct resource *res;
struct stfsm *fsm;
int ret;
@@ -2033,18 +2032,9 @@ static int stfsm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fsm);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Resource not found\n");
- return -ENODEV;
- }
-
- fsm->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(fsm->base)) {
- dev_err(&pdev->dev,
- "Failed to reserve memory region %pR\n", res);
+ fsm->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(fsm->base))
return PTR_ERR(fsm->base);
- }
fsm->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(fsm->clk)) {
diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
index e71af4c49096..f4e5174b2449 100644
--- a/drivers/mtd/lpddr/lpddr2_nvm.c
+++ b/drivers/mtd/lpddr/lpddr2_nvm.c
@@ -412,7 +412,6 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
struct map_info *map;
struct mtd_info *mtd;
struct resource *add_range;
- struct resource *control_regs;
struct pcm_int_data *pcm_data;
/* Allocate memory control_regs data structures */
@@ -452,8 +451,7 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
simple_map_init(map); /* fill with default methods */
- control_regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- pcm_data->ctl_regs = devm_ioremap_resource(&pdev->dev, control_regs);
+ pcm_data->ctl_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(pcm_data->ctl_regs))
return PTR_ERR(pcm_data->ctl_regs);
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index 67a1dbfdd72c..a1da1c8973c0 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -118,11 +118,9 @@ ltq_mtd_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ltq_mtd);
- ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!ltq_mtd->res) {
- dev_err(&pdev->dev, "failed to get memory resource\n");
- return -ENOENT;
- }
+ ltq_mtd->map->virt = devm_platform_get_and_ioremap_resource(pdev, 0, &ltq_mtd->res);
+ if (IS_ERR(ltq_mtd->map->virt))
+ return PTR_ERR(ltq_mtd->map->virt);
ltq_mtd->map = devm_kzalloc(&pdev->dev, sizeof(struct map_info),
GFP_KERNEL);
@@ -131,9 +129,6 @@ ltq_mtd_probe(struct platform_device *pdev)
ltq_mtd->map->phys = ltq_mtd->res->start;
ltq_mtd->map->size = resource_size(ltq_mtd->res);
- ltq_mtd->map->virt = devm_ioremap_resource(&pdev->dev, ltq_mtd->res);
- if (IS_ERR(ltq_mtd->map->virt))
- return PTR_ERR(ltq_mtd->map->virt);
ltq_mtd->map->name = ltq_map_name;
ltq_mtd->map->bankwidth = 2;
diff --git a/drivers/mtd/maps/physmap-bt1-rom.c b/drivers/mtd/maps/physmap-bt1-rom.c
index 58782cfaf71c..60dccc48f99e 100644
--- a/drivers/mtd/maps/physmap-bt1-rom.c
+++ b/drivers/mtd/maps/physmap-bt1-rom.c
@@ -14,7 +14,6 @@
#include <linux/mtd/xip.h>
#include <linux/mux/consumer.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/types.h>
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index c73854da5136..78710fbc8e7f 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -508,8 +508,7 @@ static int physmap_flash_probe(struct platform_device *dev)
for (i = 0; i < info->nmaps; i++) {
struct resource *res;
- res = platform_get_resource(dev, IORESOURCE_MEM, i);
- info->maps[i].virt = devm_ioremap_resource(&dev->dev, res);
+ info->maps[i].virt = devm_platform_get_and_ioremap_resource(dev, i, &res);
if (IS_ERR(info->maps[i].virt)) {
err = PTR_ERR(info->maps[i].virt);
goto err_out;
diff --git a/drivers/mtd/maps/physmap-gemini.c b/drivers/mtd/maps/physmap-gemini.c
index d4a46e159d38..9d3b4bf84a1a 100644
--- a/drivers/mtd/maps/physmap-gemini.c
+++ b/drivers/mtd/maps/physmap-gemini.c
@@ -8,10 +8,10 @@
*/
#include <linux/export.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mtd/map.h>
#include <linux/mtd/xip.h>
#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/bitops.h>
#include <linux/pinctrl/consumer.h>
diff --git a/drivers/mtd/maps/physmap-ixp4xx.c b/drivers/mtd/maps/physmap-ixp4xx.c
index 6a054229a8a0..c561468f95f6 100644
--- a/drivers/mtd/maps/physmap-ixp4xx.c
+++ b/drivers/mtd/maps/physmap-ixp4xx.c
@@ -11,7 +11,7 @@
*/
#include <linux/export.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/mtd/map.h>
#include <linux/mtd/xip.h>
#include "physmap-ixp4xx.h"
diff --git a/drivers/mtd/maps/physmap-ixp4xx.h b/drivers/mtd/maps/physmap-ixp4xx.h
index b0fc49b7f3ed..46824c57e58a 100644
--- a/drivers/mtd/maps/physmap-ixp4xx.h
+++ b/drivers/mtd/maps/physmap-ixp4xx.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/mtd/map.h>
#ifdef CONFIG_MTD_PHYSMAP_IXP4XX
diff --git a/drivers/mtd/maps/physmap-versatile.c b/drivers/mtd/maps/physmap-versatile.c
index a1b8b7b25f88..2e779111bf79 100644
--- a/drivers/mtd/maps/physmap-versatile.c
+++ b/drivers/mtd/maps/physmap-versatile.c
@@ -9,9 +9,9 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/mtd/map.h>
#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/bitops.h>
#include "physmap-versatile.h"
@@ -206,7 +206,7 @@ int of_flash_probe_versatile(struct platform_device *pdev,
if (!sysnp)
return -ENODEV;
- versatile_flashprot = (enum versatile_flashprot)devid->data;
+ versatile_flashprot = (uintptr_t)devid->data;
rmap = syscon_node_to_regmap(sysnp);
of_node_put(sysnp);
if (IS_ERR(rmap))
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index cedd8ef9a6bf..4c921dce7396 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -123,8 +123,7 @@ static int platram_probe(struct platform_device *pdev)
info->pdata = pdata;
/* get the resource for the memory mapping */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->map.virt = devm_ioremap_resource(&pdev->dev, res);
+ info->map.virt = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(info->map.virt)) {
err = PTR_ERR(info->map.virt);
goto exit_free;
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 860b19f77090..2bfdf1b7e18a 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -14,7 +14,7 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/prom.h>
#include <linux/uaccess.h>
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index fa476fb4dffb..9751416c2a91 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -262,7 +262,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
}
if (mtd_type_is_nand(mbd->mtd))
- pr_warn("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
+ pr_warn_ratelimited("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
mbd->tr->name, mbd->mtd->name);
/* OK, it's not open. Create cache info for it */
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index 66ffc9f1ead2..ef6299af60e4 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -49,7 +49,7 @@ static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
dev->readonly = 1;
if (mtd_type_is_nand(mtd))
- pr_warn("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
+ pr_warn_ratelimited("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
tr->name, mtd->name);
if (add_mtd_blktrans_dev(dev))
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index e00b12aa5ec9..9bd661be3ae9 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -93,10 +93,39 @@ static void mtd_release(struct device *dev)
struct mtd_info *mtd = dev_get_drvdata(dev);
dev_t index = MTD_DEVT(mtd->index);
+ idr_remove(&mtd_idr, mtd->index);
+ of_node_put(mtd_get_of_node(mtd));
+
+ if (mtd_is_partition(mtd))
+ release_mtd_partition(mtd);
+
/* remove /dev/mtdXro node */
device_destroy(&mtd_class, index + 1);
}
+static void mtd_device_release(struct kref *kref)
+{
+ struct mtd_info *mtd = container_of(kref, struct mtd_info, refcnt);
+ bool is_partition = mtd_is_partition(mtd);
+
+ debugfs_remove_recursive(mtd->dbg.dfs_dir);
+
+ /* Try to remove the NVMEM provider */
+ nvmem_unregister(mtd->nvmem);
+
+ device_unregister(&mtd->dev);
+
+ /*
+ * Clear dev so mtd can be safely re-registered later if desired.
+ * Should not be done for partition,
+ * as it was already destroyed in device_unregister().
+ */
+ if (!is_partition)
+ memset(&mtd->dev, 0, sizeof(mtd->dev));
+
+ module_put(THIS_MODULE);
+}
+
#define MTD_DEVICE_ATTR_RO(name) \
static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
@@ -666,7 +695,7 @@ int add_mtd_device(struct mtd_info *mtd)
}
mtd->index = i;
- mtd->usecount = 0;
+ kref_init(&mtd->refcnt);
/* default value if not set by driver */
if (mtd->bitflip_threshold == 0)
@@ -779,7 +808,6 @@ int del_mtd_device(struct mtd_info *mtd)
{
int ret;
struct mtd_notifier *not;
- struct device_node *mtd_of_node;
mutex_lock(&mtd_table_mutex);
@@ -793,28 +821,8 @@ int del_mtd_device(struct mtd_info *mtd)
list_for_each_entry(not, &mtd_notifiers, list)
not->remove(mtd);
- if (mtd->usecount) {
- printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
- mtd->index, mtd->name, mtd->usecount);
- ret = -EBUSY;
- } else {
- mtd_of_node = mtd_get_of_node(mtd);
- debugfs_remove_recursive(mtd->dbg.dfs_dir);
-
- /* Try to remove the NVMEM provider */
- nvmem_unregister(mtd->nvmem);
-
- device_unregister(&mtd->dev);
-
- /* Clear dev so mtd can be safely re-registered later if desired */
- memset(&mtd->dev, 0, sizeof(mtd->dev));
-
- idr_remove(&mtd_idr, mtd->index);
- of_node_put(mtd_of_node);
-
- module_put(THIS_MODULE);
- ret = 0;
- }
+ kref_put(&mtd->refcnt, mtd_device_release);
+ ret = 0;
out_error:
mutex_unlock(&mtd_table_mutex);
@@ -1227,25 +1235,27 @@ int __get_mtd_device(struct mtd_info *mtd)
struct mtd_info *master = mtd_get_master(mtd);
int err;
- if (!try_module_get(master->owner))
- return -ENODEV;
-
if (master->_get_device) {
err = master->_get_device(mtd);
-
- if (err) {
- module_put(master->owner);
+ if (err)
return err;
- }
}
- master->usecount++;
+ if (!try_module_get(master->owner)) {
+ if (master->_put_device)
+ master->_put_device(master);
+ return -ENODEV;
+ }
- while (mtd->parent) {
- mtd->usecount++;
+ while (mtd) {
+ if (mtd != master)
+ kref_get(&mtd->refcnt);
mtd = mtd->parent;
}
+ if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ kref_get(&master->refcnt);
+
return 0;
}
EXPORT_SYMBOL_GPL(__get_mtd_device);
@@ -1329,18 +1339,23 @@ void __put_mtd_device(struct mtd_info *mtd)
{
struct mtd_info *master = mtd_get_master(mtd);
- while (mtd->parent) {
- --mtd->usecount;
- BUG_ON(mtd->usecount < 0);
- mtd = mtd->parent;
+ while (mtd) {
+ /* kref_put() can relese mtd, so keep a reference mtd->parent */
+ struct mtd_info *parent = mtd->parent;
+
+ if (mtd != master)
+ kref_put(&mtd->refcnt, mtd_device_release);
+ mtd = parent;
}
- master->usecount--;
+ if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ kref_put(&master->refcnt, mtd_device_release);
+
+ module_put(master->owner);
+ /* must be the last as master can be freed in the _put_device */
if (master->_put_device)
master->_put_device(master);
-
- module_put(master->owner);
}
EXPORT_SYMBOL_GPL(__put_mtd_device);
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index b5eefeabf310..b014861a06a6 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -12,6 +12,7 @@ int __must_check add_mtd_device(struct mtd_info *mtd);
int del_mtd_device(struct mtd_info *mtd);
int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
int del_mtd_partitions(struct mtd_info *);
+void release_mtd_partition(struct mtd_info *mtd);
struct mtd_partitions;
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index a46affbb037d..23483db8f30c 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -32,6 +32,12 @@ static inline void free_partition(struct mtd_info *mtd)
kfree(mtd);
}
+void release_mtd_partition(struct mtd_info *mtd)
+{
+ WARN_ON(!list_empty(&mtd->part.node));
+ free_partition(mtd);
+}
+
static struct mtd_info *allocate_partition(struct mtd_info *parent,
const struct mtd_partition *part,
int partno, uint64_t cur_offset)
@@ -309,13 +315,11 @@ static int __mtd_del_partition(struct mtd_info *mtd)
sysfs_remove_files(&mtd->dev.kobj, mtd_partition_attrs);
+ list_del_init(&mtd->part.node);
err = del_mtd_device(mtd);
if (err)
return err;
- list_del(&mtd->part.node);
- free_partition(mtd);
-
return 0;
}
@@ -333,6 +337,7 @@ static int __del_mtd_partitions(struct mtd_info *mtd)
__del_mtd_partitions(child);
pr_info("Deleting %s MTD partition\n", child->name);
+ list_del_init(&child->part.node);
ret = del_mtd_device(child);
if (ret < 0) {
pr_err("Error when deleting partition \"%s\" (%d)\n",
@@ -340,9 +345,6 @@ static int __del_mtd_partitions(struct mtd_info *mtd)
err = ret;
continue;
}
-
- list_del(&child->part.node);
- free_partition(child);
}
return err;
diff --git a/drivers/mtd/nand/ecc-mxic.c b/drivers/mtd/nand/ecc-mxic.c
index 22a760e6024e..47e10945b8d2 100644
--- a/drivers/mtd/nand/ecc-mxic.c
+++ b/drivers/mtd/nand/ecc-mxic.c
@@ -18,7 +18,7 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/nand-ecc-mxic.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/mtd/nand/ecc.c b/drivers/mtd/nand/ecc.c
index 5250764cedee..8f996e8d61b8 100644
--- a/drivers/mtd/nand/ecc.c
+++ b/drivers/mtd/nand/ecc.c
@@ -95,9 +95,9 @@
#include <linux/module.h>
#include <linux/mtd/nand.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_platform.h>
static LIST_HEAD(on_host_hw_engines);
diff --git a/drivers/mtd/nand/onenand/onenand_omap2.c b/drivers/mtd/nand/onenand/onenand_omap2.c
index ff7af98604df..a12f8f3efd07 100644
--- a/drivers/mtd/nand/onenand/onenand_omap2.c
+++ b/drivers/mtd/nand/onenand/onenand_omap2.c
@@ -13,7 +13,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/omap-gpmc.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
@@ -467,12 +467,6 @@ static int omap2_onenand_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "error getting memory resource\n");
- return -EINVAL;
- }
-
r = of_property_read_u32(np, "reg", &val);
if (r) {
dev_err(dev, "reg not found in DT\n");
@@ -486,11 +480,11 @@ static int omap2_onenand_probe(struct platform_device *pdev)
init_completion(&c->irq_done);
init_completion(&c->dma_done);
c->gpmc_cs = val;
- c->phys_base = res->start;
- c->onenand.base = devm_ioremap_resource(dev, res);
+ c->onenand.base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(c->onenand.base))
return PTR_ERR(c->onenand.base);
+ c->phys_base = res->start;
c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN);
if (IS_ERR(c->int_gpiod)) {
diff --git a/drivers/mtd/nand/onenand/onenand_samsung.c b/drivers/mtd/nand/onenand/onenand_samsung.c
index 92151aa52964..fd6890a03d55 100644
--- a/drivers/mtd/nand/onenand/onenand_samsung.c
+++ b/drivers/mtd/nand/onenand/onenand_samsung.c
@@ -860,8 +860,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
s3c_onenand_setup(mtd);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- onenand->base = devm_ioremap_resource(&pdev->dev, r);
+ onenand->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(onenand->base))
return PTR_ERR(onenand->base);
@@ -874,8 +873,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
this->options |= ONENAND_SKIP_UNLOCK_CHECK;
if (onenand->type != TYPE_S5PC110) {
- r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- onenand->ahb_addr = devm_ioremap_resource(&pdev->dev, r);
+ onenand->ahb_addr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(onenand->ahb_addr))
return PTR_ERR(onenand->ahb_addr);
@@ -895,8 +893,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
this->subpagesize = mtd->writesize;
} else { /* S5PC110 */
- r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- onenand->dma_addr = devm_ioremap_resource(&pdev->dev, r);
+ onenand->dma_addr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(onenand->dma_addr))
return PTR_ERR(onenand->dma_addr);
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index b523354dfb00..cbf8ae85e1ae 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -160,7 +160,7 @@ config MTD_NAND_MARVELL
including:
- PXA3xx processors (NFCv1)
- 32-bit Armada platforms (XP, 37x, 38x, 39x) (NFCv2)
- - 64-bit Aramda platforms (7k, 8k) (NFCv2)
+ - 64-bit Aramda platforms (7k, 8k, ac5) (NFCv2)
config MTD_NAND_SLC_LPC32XX
tristate "NXP LPC32xx SLC NAND controller"
@@ -204,13 +204,6 @@ config MTD_NAND_BCM47XXNFLASH
registered by bcma as platform devices. This enables driver for
NAND flash memories. For now only BCM4706 is supported.
-config MTD_NAND_OXNAS
- tristate "Oxford Semiconductor NAND controller"
- depends on ARCH_OXNAS || COMPILE_TEST
- depends on HAS_IOMEM
- help
- This enables the NAND flash controller on Oxford Semiconductor SoCs.
-
config MTD_NAND_MPC5121_NFC
tristate "MPC5121 NAND controller"
depends on PPC_MPC512x
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index d93e861d8ba7..25120a4afada 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -26,7 +26,6 @@ obj-$(CONFIG_MTD_NAND_MARVELL) += marvell_nand.o
obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
-obj-$(CONFIG_MTD_NAND_OXNAS) += oxnas_nand.o
obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o
obj-$(CONFIG_MTD_NAND_FSL_IFC) += fsl_ifc_nand.o
obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o
diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c
index fa621ffa6490..919816a7aca7 100644
--- a/drivers/mtd/nand/raw/ams-delta.c
+++ b/drivers/mtd/nand/raw/ams-delta.c
@@ -22,7 +22,7 @@
#include <linux/mtd/nand-gpio.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
index 906eef70cb6d..4621ec549cc7 100644
--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -1440,45 +1440,29 @@ static int anfc_probe(struct platform_device *pdev)
anfc_reset(nfc);
- nfc->controller_clk = devm_clk_get(&pdev->dev, "controller");
+ nfc->controller_clk = devm_clk_get_enabled(&pdev->dev, "controller");
if (IS_ERR(nfc->controller_clk))
return PTR_ERR(nfc->controller_clk);
- nfc->bus_clk = devm_clk_get(&pdev->dev, "bus");
+ nfc->bus_clk = devm_clk_get_enabled(&pdev->dev, "bus");
if (IS_ERR(nfc->bus_clk))
return PTR_ERR(nfc->bus_clk);
- ret = clk_prepare_enable(nfc->controller_clk);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(nfc->bus_clk);
- if (ret)
- goto disable_controller_clk;
-
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (ret)
- goto disable_bus_clk;
+ return ret;
ret = anfc_parse_cs(nfc);
if (ret)
- goto disable_bus_clk;
+ return ret;
ret = anfc_chips_init(nfc);
if (ret)
- goto disable_bus_clk;
+ return ret;
platform_set_drvdata(pdev, nfc);
return 0;
-
-disable_bus_clk:
- clk_disable_unprepare(nfc->bus_clk);
-
-disable_controller_clk:
- clk_disable_unprepare(nfc->controller_clk);
-
- return ret;
}
static void anfc_remove(struct platform_device *pdev)
@@ -1486,9 +1470,6 @@ static void anfc_remove(struct platform_device *pdev)
struct arasan_nfc *nfc = platform_get_drvdata(pdev);
anfc_chips_cleanup(nfc);
-
- clk_disable_unprepare(nfc->bus_clk);
- clk_disable_unprepare(nfc->controller_clk);
}
static const struct of_device_id anfc_ids[] = {
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 81e3d682a8cd..3f494f7c7ecb 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -1791,8 +1791,7 @@ atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc)
nand->numcs = 1;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nand->cs[0].io.virt = devm_ioremap_resource(dev, res);
+ nand->cs[0].io.virt = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(nand->cs[0].io.virt))
return PTR_ERR(nand->cs[0].io.virt);
diff --git a/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c b/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
index 71ddcc611f6e..9596629000f4 100644
--- a/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
+++ b/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
@@ -61,15 +61,13 @@ static int bcm63138_nand_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct bcm63138_nand_soc *priv;
struct brcmnand_soc *soc;
- struct resource *res;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
soc = &priv->soc;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-int-base");
- priv->base = devm_ioremap_resource(dev, res);
+ priv->base = devm_platform_ioremap_resource_byname(pdev, "nand-int-base");
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 2e9c2e2d9c9f..440bef477930 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -272,6 +272,7 @@ struct brcmnand_controller {
const unsigned int *page_sizes;
unsigned int page_size_shift;
unsigned int max_oob;
+ u32 ecc_level_shift;
u32 features;
/* for low-power standby/resume only */
@@ -596,6 +597,34 @@ enum {
INTFC_CTLR_READY = BIT(31),
};
+/***********************************************************************
+ * NAND ACC CONTROL bitfield
+ *
+ * Some bits have remained constant throughout hardware revision, while
+ * others have shifted around.
+ ***********************************************************************/
+
+/* Constant for all versions (where supported) */
+enum {
+ /* See BRCMNAND_HAS_CACHE_MODE */
+ ACC_CONTROL_CACHE_MODE = BIT(22),
+
+ /* See BRCMNAND_HAS_PREFETCH */
+ ACC_CONTROL_PREFETCH = BIT(23),
+
+ ACC_CONTROL_PAGE_HIT = BIT(24),
+ ACC_CONTROL_WR_PREEMPT = BIT(25),
+ ACC_CONTROL_PARTIAL_PAGE = BIT(26),
+ ACC_CONTROL_RD_ERASED = BIT(27),
+ ACC_CONTROL_FAST_PGM_RDIN = BIT(28),
+ ACC_CONTROL_WR_ECC = BIT(30),
+ ACC_CONTROL_RD_ECC = BIT(31),
+};
+
+#define ACC_CONTROL_ECC_SHIFT 16
+/* Only for v7.2 */
+#define ACC_CONTROL_ECC_EXT_SHIFT 13
+
static inline bool brcmnand_non_mmio_ops(struct brcmnand_controller *ctrl)
{
#if IS_ENABLED(CONFIG_MTD_NAND_BRCMNAND_BCMA)
@@ -737,6 +766,12 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
ctrl->features |= BRCMNAND_HAS_WP;
+ /* v7.2 has different ecc level shift in the acc register */
+ if (ctrl->nand_version == 0x0702)
+ ctrl->ecc_level_shift = ACC_CONTROL_ECC_EXT_SHIFT;
+ else
+ ctrl->ecc_level_shift = ACC_CONTROL_ECC_SHIFT;
+
return 0;
}
@@ -931,30 +966,6 @@ static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
return 0;
}
-/***********************************************************************
- * NAND ACC CONTROL bitfield
- *
- * Some bits have remained constant throughout hardware revision, while
- * others have shifted around.
- ***********************************************************************/
-
-/* Constant for all versions (where supported) */
-enum {
- /* See BRCMNAND_HAS_CACHE_MODE */
- ACC_CONTROL_CACHE_MODE = BIT(22),
-
- /* See BRCMNAND_HAS_PREFETCH */
- ACC_CONTROL_PREFETCH = BIT(23),
-
- ACC_CONTROL_PAGE_HIT = BIT(24),
- ACC_CONTROL_WR_PREEMPT = BIT(25),
- ACC_CONTROL_PARTIAL_PAGE = BIT(26),
- ACC_CONTROL_RD_ERASED = BIT(27),
- ACC_CONTROL_FAST_PGM_RDIN = BIT(28),
- ACC_CONTROL_WR_ECC = BIT(30),
- ACC_CONTROL_RD_ECC = BIT(31),
-};
-
static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
{
if (ctrl->nand_version == 0x0702)
@@ -967,18 +978,15 @@ static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
return GENMASK(4, 0);
}
-#define NAND_ACC_CONTROL_ECC_SHIFT 16
-#define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13
-
static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
{
u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
- mask <<= NAND_ACC_CONTROL_ECC_SHIFT;
+ mask <<= ACC_CONTROL_ECC_SHIFT;
/* v7.2 includes additional ECC levels */
- if (ctrl->nand_version >= 0x0702)
- mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT;
+ if (ctrl->nand_version == 0x0702)
+ mask |= 0x7 << ACC_CONTROL_ECC_EXT_SHIFT;
return mask;
}
@@ -992,8 +1000,8 @@ static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
if (en) {
acc_control |= ecc_flags; /* enable RD/WR ECC */
- acc_control |= host->hwcfg.ecc_level
- << NAND_ACC_CONTROL_ECC_SHIFT;
+ acc_control &= ~brcmnand_ecc_level_mask(ctrl);
+ acc_control |= host->hwcfg.ecc_level << ctrl->ecc_level_shift;
} else {
acc_control &= ~ecc_flags; /* disable RD/WR ECC */
acc_control &= ~brcmnand_ecc_level_mask(ctrl);
@@ -1072,6 +1080,14 @@ static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
cpu_relax();
} while (time_after(limit, jiffies));
+ /*
+ * do a final check after time out in case the CPU was busy and the driver
+ * did not get enough time to perform the polling to avoid false alarms
+ */
+ val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
+ if ((val & mask) == expected_val)
+ return 0;
+
dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
expected_val, val & mask);
@@ -1461,19 +1477,33 @@ static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
const u8 *oob, int sas, int sector_1k)
{
int tbytes = sas << sector_1k;
- int j;
+ int j, k = 0;
+ u32 last = 0xffffffff;
+ u8 *plast = (u8 *)&last;
/* Adjust OOB values for 1K sector size */
if (sector_1k && (i & 0x01))
tbytes = max(0, tbytes - (int)ctrl->max_oob);
tbytes = min_t(int, tbytes, ctrl->max_oob);
- for (j = 0; j < tbytes; j += 4)
+ /*
+ * tbytes may not be multiple of words. Make sure we don't read out of
+ * the boundary and stop at last word.
+ */
+ for (j = 0; (j + 3) < tbytes; j += 4)
oob_reg_write(ctrl, j,
(oob[j + 0] << 24) |
(oob[j + 1] << 16) |
(oob[j + 2] << 8) |
(oob[j + 3] << 0));
+
+ /* handle the remaing bytes */
+ while (j < tbytes)
+ plast[k++] = oob[j++];
+
+ if (tbytes & 0x3)
+ oob_reg_write(ctrl, (tbytes & ~0x3), (__force u32)cpu_to_be32(last));
+
return tbytes;
}
@@ -1592,7 +1622,17 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr);
- BUG_ON(ctrl->cmd_pending != 0);
+ /*
+ * If we came here through _panic_write and there is a pending
+ * command, try to wait for it. If it times out, rather than
+ * hitting BUG_ON, just return so we don't crash while crashing.
+ */
+ if (oops_in_progress) {
+ if (ctrl->cmd_pending &&
+ bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0))
+ return;
+ } else
+ BUG_ON(ctrl->cmd_pending != 0);
ctrl->cmd_pending = cmd;
ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
@@ -1626,13 +1666,13 @@ static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip)
disable_ctrl_irqs(ctrl);
sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY,
NAND_CTRL_RDY, 0);
- err = (sts < 0) ? true : false;
+ err = sts < 0;
} else {
unsigned long timeo = msecs_to_jiffies(
NAND_POLL_STATUS_TIMEOUT_MS);
/* wait for completion interrupt */
sts = wait_for_completion_timeout(&ctrl->done, timeo);
- err = (sts <= 0) ? true : false;
+ err = !sts;
}
return err;
@@ -1648,6 +1688,7 @@ static int brcmnand_waitfunc(struct nand_chip *chip)
if (ctrl->cmd_pending)
err = brcmstb_nand_wait_for_completion(chip);
+ ctrl->cmd_pending = 0;
if (err) {
u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START)
>> brcmnand_cmd_shift(ctrl);
@@ -1656,8 +1697,8 @@ static int brcmnand_waitfunc(struct nand_chip *chip)
"timeout waiting for command %#02x\n", cmd);
dev_err_ratelimited(ctrl->dev, "intfc status %08x\n",
brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS));
+ return -ETIMEDOUT;
}
- ctrl->cmd_pending = 0;
return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
INTFC_FLASH_STATUS;
}
@@ -2561,7 +2602,7 @@ static int brcmnand_set_cfg(struct brcmnand_host *host,
tmp &= ~brcmnand_ecc_level_mask(ctrl);
tmp &= ~brcmnand_spare_area_mask(ctrl);
if (ctrl->nand_version >= 0x0302) {
- tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
+ tmp |= cfg->ecc_level << ctrl->ecc_level_shift;
tmp |= cfg->spare_area_size;
}
nand_writereg(ctrl, acc_control_offs, tmp);
@@ -2612,6 +2653,8 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
struct nand_chip *chip = &host->chip;
const struct nand_ecc_props *requirements =
nanddev_get_ecc_requirements(&chip->base);
+ struct nand_memory_organization *memorg =
+ nanddev_get_memorg(&chip->base);
struct brcmnand_controller *ctrl = host->ctrl;
struct brcmnand_cfg *cfg = &host->hwcfg;
char msg[128];
@@ -2633,10 +2676,11 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
if (cfg->spare_area_size > ctrl->max_oob)
cfg->spare_area_size = ctrl->max_oob;
/*
- * Set oobsize to be consistent with controller's spare_area_size, as
- * the rest is inaccessible.
+ * Set mtd and memorg oobsize to be consistent with controller's
+ * spare_area_size, as the rest is inaccessible.
*/
mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT);
+ memorg->oobsize = mtd->oobsize;
cfg->device_size = mtd->size;
cfg->block_size = mtd->erasesize;
@@ -3202,6 +3246,10 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
ret = brcmnand_init_cs(host, NULL);
if (ret) {
+ if (ret == -EPROBE_DEFER) {
+ of_node_put(child);
+ goto err;
+ }
devm_kfree(dev, host);
continue; /* Try all chip-selects */
}
diff --git a/drivers/mtd/nand/raw/brcmnand/iproc_nand.c b/drivers/mtd/nand/raw/brcmnand/iproc_nand.c
index d32950847a62..089c70fc6edf 100644
--- a/drivers/mtd/nand/raw/brcmnand/iproc_nand.c
+++ b/drivers/mtd/nand/raw/brcmnand/iproc_nand.c
@@ -103,7 +103,6 @@ static int iproc_nand_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct iproc_nand_soc *priv;
struct brcmnand_soc *soc;
- struct resource *res;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -112,13 +111,11 @@ static int iproc_nand_probe(struct platform_device *pdev)
spin_lock_init(&priv->idm_lock);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iproc-idm");
- priv->idm_base = devm_ioremap_resource(dev, res);
+ priv->idm_base = devm_platform_ioremap_resource_byname(pdev, "iproc-idm");
if (IS_ERR(priv->idm_base))
return PTR_ERR(priv->idm_base);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iproc-ext");
- priv->ext_base = devm_ioremap_resource(dev, res);
+ priv->ext_base = devm_platform_ioremap_resource_byname(pdev, "iproc-ext");
if (IS_ERR(priv->ext_base))
return PTR_ERR(priv->ext_base);
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
index 415d6aaa8255..e75d81cf8c21 100644
--- a/drivers/mtd/nand/raw/davinci_nand.c
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -18,7 +18,6 @@
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
-#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/platform_data/mtd-davinci.h>
diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
index 915047e3fbc2..edac8749bb93 100644
--- a/drivers/mtd/nand/raw/denali_dt.c
+++ b/drivers/mtd/nand/raw/denali_dt.c
@@ -13,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
index fa537fee6701..20bb1e0cb5eb 100644
--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -8,6 +8,7 @@
*/
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c
index 7366e85c09fd..315e9d2b573d 100644
--- a/drivers/mtd/nand/raw/fsl_upm.c
+++ b/drivers/mtd/nand/raw/fsl_upm.c
@@ -13,7 +13,8 @@
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/mtd.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <asm/fsl_lbc.h>
@@ -172,8 +173,7 @@ static int fun_probe(struct platform_device *ofdev)
if (!fun)
return -ENOMEM;
- io_res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
- fun->io_base = devm_ioremap_resource(&ofdev->dev, io_res);
+ fun->io_base = devm_platform_get_and_ioremap_resource(ofdev, 0, &io_res);
if (IS_ERR(fun->io_base))
return PTR_ERR(fun->io_base);
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index 7b4742420dfc..811982da3557 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -1066,16 +1066,12 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
host->regs_va = base + FSMC_NOR_REG_SIZE +
(host->bank * FSMC_NAND_BANK_SZ);
- host->clk = devm_clk_get(&pdev->dev, NULL);
+ host->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "failed to fetch block clock\n");
return PTR_ERR(host->clk);
}
- ret = clk_prepare_enable(host->clk);
- if (ret)
- return ret;
-
/*
* This device ID is actually a common AMBA ID as used on the
* AMBA PrimeCell bus. However it is not a PrimeCell.
@@ -1111,7 +1107,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
if (!host->read_dma_chan) {
dev_err(&pdev->dev, "Unable to get read dma channel\n");
ret = -ENODEV;
- goto disable_clk;
+ goto disable_fsmc;
}
host->write_dma_chan = dma_request_channel(mask, filter, NULL);
if (!host->write_dma_chan) {
@@ -1155,9 +1151,8 @@ release_dma_write_chan:
release_dma_read_chan:
if (host->mode == USE_DMA_ACCESS)
dma_release_channel(host->read_dma_chan);
-disable_clk:
+disable_fsmc:
fsmc_nand_disable(host);
- clk_disable_unprepare(host->clk);
return ret;
}
@@ -1182,7 +1177,6 @@ static void fsmc_nand_remove(struct platform_device *pdev)
dma_release_channel(host->write_dma_chan);
dma_release_channel(host->read_dma_chan);
}
- clk_disable_unprepare(host->clk);
}
}
@@ -1200,9 +1194,14 @@ static int fsmc_nand_suspend(struct device *dev)
static int fsmc_nand_resume(struct device *dev)
{
struct fsmc_nand_data *host = dev_get_drvdata(dev);
+ int ret;
if (host) {
- clk_prepare_enable(host->clk);
+ ret = clk_prepare_enable(host->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clk\n");
+ return ret;
+ }
if (host->dev_timings)
fsmc_nand_setup(host, host->dev_timings);
nand_reset(&host->nand, 0);
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 500e7a28d2e4..e71ad2fcec23 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -13,7 +13,7 @@
#include <linux/module.h>
#include <linux/mtd/partitions.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/dma/mxs-dma.h>
#include "gpmi-nand.h"
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
index 9054559e52dd..525c34c281b6 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
index b9f135297aa0..6748226b8bd1 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
index a9909eb08124..cb5d88f42297 100644
--- a/drivers/mtd/nand/raw/intel-nand-controller.c
+++ b/drivers/mtd/nand/raw/intel-nand-controller.c
@@ -626,16 +626,10 @@ static int ebu_nand_probe(struct platform_device *pdev)
goto err_of_node_put;
}
- ebu_host->clk = devm_clk_get(dev, NULL);
+ ebu_host->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(ebu_host->clk)) {
ret = dev_err_probe(dev, PTR_ERR(ebu_host->clk),
- "failed to get clock\n");
- goto err_of_node_put;
- }
-
- ret = clk_prepare_enable(ebu_host->clk);
- if (ret) {
- dev_err(dev, "failed to enable clock: %d\n", ret);
+ "failed to get and enable clock\n");
goto err_of_node_put;
}
@@ -643,7 +637,7 @@ static int ebu_nand_probe(struct platform_device *pdev)
if (IS_ERR(ebu_host->dma_tx)) {
ret = dev_err_probe(dev, PTR_ERR(ebu_host->dma_tx),
"failed to request DMA tx chan!.\n");
- goto err_disable_unprepare_clk;
+ goto err_of_node_put;
}
ebu_host->dma_rx = dma_request_chan(dev, "rx");
@@ -698,8 +692,6 @@ err_clean_nand:
nand_cleanup(&ebu_host->chip);
err_cleanup_dma:
ebu_dma_cleanup(ebu_host);
-err_disable_unprepare_clk:
- clk_disable_unprepare(ebu_host->clk);
err_of_node_put:
of_node_put(chip_np);
@@ -716,7 +708,6 @@ static void ebu_nand_remove(struct platform_device *pdev)
nand_cleanup(&ebu_host->chip);
ebu_nand_disable(&ebu_host->chip);
ebu_dma_cleanup(ebu_host);
- clk_disable_unprepare(ebu_host->clk);
}
static const struct of_device_id ebu_nand_match[] = {
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
index b3136ae6f4e9..488fd452611a 100644
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -695,8 +695,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
host->pdev = pdev;
- rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->io_base = devm_ioremap_resource(&pdev->dev, rc);
+ host->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &rc);
if (IS_ERR(host->io_base))
return PTR_ERR(host->io_base);
diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c
index 3139b6107660..1c5fa855b9f2 100644
--- a/drivers/mtd/nand/raw/lpc32xx_slc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_slc.c
@@ -836,8 +836,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
if (!host)
return -ENOMEM;
- rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->io_base = devm_ioremap_resource(&pdev->dev, rc);
+ host->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &rc);
if (IS_ERR(host->io_base))
return PTR_ERR(host->io_base);
@@ -872,15 +871,12 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
mtd->dev.parent = &pdev->dev;
/* Get NAND clock */
- host->clk = devm_clk_get(&pdev->dev, NULL);
+ host->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "Clock failure\n");
res = -ENOENT;
goto enable_wp;
}
- res = clk_prepare_enable(host->clk);
- if (res)
- goto enable_wp;
/* Set NAND IO addresses and command/ready functions */
chip->legacy.IO_ADDR_R = SLC_DATA(host->io_base);
@@ -908,13 +904,13 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
GFP_KERNEL);
if (host->data_buf == NULL) {
res = -ENOMEM;
- goto unprepare_clk;
+ goto enable_wp;
}
res = lpc32xx_nand_dma_setup(host);
if (res) {
res = -EIO;
- goto unprepare_clk;
+ goto enable_wp;
}
/* Find NAND device */
@@ -935,8 +931,6 @@ cleanup_nand:
nand_cleanup(chip);
release_dma:
dma_release_channel(host->dma_chan);
-unprepare_clk:
- clk_disable_unprepare(host->clk);
enable_wp:
lpc32xx_wp_enable(host);
@@ -963,7 +957,6 @@ static void lpc32xx_nand_remove(struct platform_device *pdev)
tmp &= ~SLCCFG_CE_LOW;
writel(tmp, SLC_CTRL(host->io_base));
- clk_disable_unprepare(host->clk);
lpc32xx_wp_enable(host);
}
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 30c15e4e1cc0..2c94da7a3b3a 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -77,9 +77,10 @@
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/mtd/rawnand.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/iopoll.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
@@ -375,6 +376,7 @@ static inline struct marvell_nand_chip_sel *to_nand_sel(struct marvell_nand_chip
* BCH error detection and correction algorithm,
* NDCB3 register has been added
* @use_dma: Use dma for data transfers
+ * @max_mode_number: Maximum timing mode supported by the controller
*/
struct marvell_nfc_caps {
unsigned int max_cs_nb;
@@ -383,6 +385,7 @@ struct marvell_nfc_caps {
bool legacy_of_bindings;
bool is_nfcv2;
bool use_dma;
+ unsigned int max_mode_number;
};
/**
@@ -2376,6 +2379,9 @@ static int marvell_nfc_setup_interface(struct nand_chip *chip, int chipnr,
if (IS_ERR(sdr))
return PTR_ERR(sdr);
+ if (nfc->caps->max_mode_number && nfc->caps->max_mode_number < conf->timings.mode)
+ return -EOPNOTSUPP;
+
/*
* SDR timings are given in pico-seconds while NFC timings must be
* expressed in NAND controller clock cycles, which is half of the
@@ -3073,6 +3079,13 @@ static const struct marvell_nfc_caps marvell_armada_8k_nfc_caps = {
.is_nfcv2 = true,
};
+static const struct marvell_nfc_caps marvell_ac5_caps = {
+ .max_cs_nb = 2,
+ .max_rb_nb = 1,
+ .is_nfcv2 = true,
+ .max_mode_number = 3,
+};
+
static const struct marvell_nfc_caps marvell_armada370_nfc_caps = {
.max_cs_nb = 4,
.max_rb_nb = 2,
@@ -3122,6 +3135,10 @@ static const struct of_device_id marvell_nfc_of_ids[] = {
.data = &marvell_armada_8k_nfc_caps,
},
{
+ .compatible = "marvell,ac5-nand-controller",
+ .data = &marvell_ac5_caps,
+ },
+ {
.compatible = "marvell,armada370-nand-controller",
.data = &marvell_armada370_nfc_caps,
},
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index b10011dec1e6..25e3c1cb605e 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/iopoll.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/sched/task_stack.h>
#define NFC_REG_CMD 0x00
@@ -135,6 +134,7 @@ struct meson_nfc_nand_chip {
struct meson_nand_ecc {
u32 bch;
u32 strength;
+ u32 size;
};
struct meson_nfc_data {
@@ -190,7 +190,8 @@ struct meson_nfc {
};
enum {
- NFC_ECC_BCH8_1K = 2,
+ NFC_ECC_BCH8_512 = 1,
+ NFC_ECC_BCH8_1K,
NFC_ECC_BCH24_1K,
NFC_ECC_BCH30_1K,
NFC_ECC_BCH40_1K,
@@ -198,15 +199,16 @@ enum {
NFC_ECC_BCH60_1K,
};
-#define MESON_ECC_DATA(b, s) { .bch = (b), .strength = (s)}
+#define MESON_ECC_DATA(b, s, sz) { .bch = (b), .strength = (s), .size = (sz) }
static struct meson_nand_ecc meson_ecc[] = {
- MESON_ECC_DATA(NFC_ECC_BCH8_1K, 8),
- MESON_ECC_DATA(NFC_ECC_BCH24_1K, 24),
- MESON_ECC_DATA(NFC_ECC_BCH30_1K, 30),
- MESON_ECC_DATA(NFC_ECC_BCH40_1K, 40),
- MESON_ECC_DATA(NFC_ECC_BCH50_1K, 50),
- MESON_ECC_DATA(NFC_ECC_BCH60_1K, 60),
+ MESON_ECC_DATA(NFC_ECC_BCH8_512, 8, 512),
+ MESON_ECC_DATA(NFC_ECC_BCH8_1K, 8, 1024),
+ MESON_ECC_DATA(NFC_ECC_BCH24_1K, 24, 1024),
+ MESON_ECC_DATA(NFC_ECC_BCH30_1K, 30, 1024),
+ MESON_ECC_DATA(NFC_ECC_BCH40_1K, 40, 1024),
+ MESON_ECC_DATA(NFC_ECC_BCH50_1K, 50, 1024),
+ MESON_ECC_DATA(NFC_ECC_BCH60_1K, 60, 1024),
};
static int meson_nand_calc_ecc_bytes(int step_size, int strength)
@@ -224,8 +226,27 @@ static int meson_nand_calc_ecc_bytes(int step_size, int strength)
NAND_ECC_CAPS_SINGLE(meson_gxl_ecc_caps,
meson_nand_calc_ecc_bytes, 1024, 8, 24, 30, 40, 50, 60);
-NAND_ECC_CAPS_SINGLE(meson_axg_ecc_caps,
- meson_nand_calc_ecc_bytes, 1024, 8);
+
+static const int axg_stepinfo_strengths[] = { 8 };
+
+static const struct nand_ecc_step_info axg_stepinfo[] = {
+ {
+ .stepsize = 1024,
+ .strengths = axg_stepinfo_strengths,
+ .nstrengths = ARRAY_SIZE(axg_stepinfo_strengths)
+ },
+ {
+ .stepsize = 512,
+ .strengths = axg_stepinfo_strengths,
+ .nstrengths = ARRAY_SIZE(axg_stepinfo_strengths)
+ },
+};
+
+static const struct nand_ecc_caps meson_axg_ecc_caps = {
+ .stepinfos = axg_stepinfo,
+ .nstepinfos = ARRAY_SIZE(axg_stepinfo),
+ .calc_ecc_bytes = meson_nand_calc_ecc_bytes,
+};
static struct meson_nfc_nand_chip *to_meson_nand(struct nand_chip *nand)
{
@@ -400,9 +421,10 @@ static void meson_nfc_set_data_oob(struct nand_chip *nand,
}
}
-static int meson_nfc_wait_no_rb_pin(struct meson_nfc *nfc, int timeout_ms,
+static int meson_nfc_wait_no_rb_pin(struct nand_chip *nand, int timeout_ms,
bool need_cmd_read0)
{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
u32 cmd, cfg;
meson_nfc_cmd_idle(nfc, nfc->timing.twb);
@@ -414,8 +436,7 @@ static int meson_nfc_wait_no_rb_pin(struct meson_nfc *nfc, int timeout_ms,
writel(cfg, nfc->reg_base + NFC_REG_CFG);
reinit_completion(&nfc->completion);
- cmd = nfc->param.chip_select | NFC_CMD_CLE | NAND_CMD_STATUS;
- writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ nand_status_op(nand, NULL);
/* use the max erase time as the maximum clock for waiting R/B */
cmd = NFC_CMD_RB | NFC_CMD_RB_INT_NO_PIN | nfc->timing.tbers_max;
@@ -425,12 +446,8 @@ static int meson_nfc_wait_no_rb_pin(struct meson_nfc *nfc, int timeout_ms,
msecs_to_jiffies(timeout_ms)))
return -ETIMEDOUT;
- if (need_cmd_read0) {
- cmd = nfc->param.chip_select | NFC_CMD_CLE | NAND_CMD_READ0;
- writel(cmd, nfc->reg_base + NFC_REG_CMD);
- meson_nfc_drain_cmd(nfc);
- meson_nfc_wait_cmd_finish(nfc, CMD_FIFO_EMPTY_TIMEOUT);
- }
+ if (need_cmd_read0)
+ nand_exit_status_op(nand);
return 0;
}
@@ -463,9 +480,11 @@ static int meson_nfc_wait_rb_pin(struct meson_nfc *nfc, int timeout_ms)
return ret;
}
-static int meson_nfc_queue_rb(struct meson_nfc *nfc, int timeout_ms,
+static int meson_nfc_queue_rb(struct nand_chip *nand, int timeout_ms,
bool need_cmd_read0)
{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+
if (nfc->no_rb_pin) {
/* This mode is used when there is no wired R/B pin.
* It works like 'nand_soft_waitrdy()', but instead of
@@ -477,7 +496,7 @@ static int meson_nfc_queue_rb(struct meson_nfc *nfc, int timeout_ms,
* needed (for all cases except page programming - this
* is reason of 'need_cmd_read0' flag).
*/
- return meson_nfc_wait_no_rb_pin(nfc, timeout_ms,
+ return meson_nfc_wait_no_rb_pin(nand, timeout_ms,
need_cmd_read0);
} else {
return meson_nfc_wait_rb_pin(nfc, timeout_ms);
@@ -687,7 +706,7 @@ static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand,
if (in) {
nfc->cmdfifo.rw.cmd1 = cs | NFC_CMD_CLE | NAND_CMD_READSTART;
writel(nfc->cmdfifo.rw.cmd1, nfc->reg_base + NFC_REG_CMD);
- meson_nfc_queue_rb(nfc, PSEC_TO_MSEC(sdr->tR_max), true);
+ meson_nfc_queue_rb(nand, PSEC_TO_MSEC(sdr->tR_max), true);
} else {
meson_nfc_cmd_idle(nfc, nfc->timing.tadl);
}
@@ -733,7 +752,7 @@ static int meson_nfc_write_page_sub(struct nand_chip *nand,
cmd = nfc->param.chip_select | NFC_CMD_CLE | NAND_CMD_PAGEPROG;
writel(cmd, nfc->reg_base + NFC_REG_CMD);
- meson_nfc_queue_rb(nfc, PSEC_TO_MSEC(sdr->tPROG_max), false);
+ meson_nfc_queue_rb(nand, PSEC_TO_MSEC(sdr->tPROG_max), false);
meson_nfc_dma_buffer_release(nand, data_len, info_len, DMA_TO_DEVICE);
@@ -1049,7 +1068,7 @@ static int meson_nfc_exec_op(struct nand_chip *nand,
break;
case NAND_OP_WAITRDY_INSTR:
- meson_nfc_queue_rb(nfc, instr->ctx.waitrdy.timeout_ms,
+ meson_nfc_queue_rb(nand, instr->ctx.waitrdy.timeout_ms,
true);
if (instr->delay_ns)
meson_nfc_cmd_idle(nfc, delay_idle);
@@ -1259,7 +1278,8 @@ static int meson_nand_bch_mode(struct nand_chip *nand)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(meson_ecc); i++) {
- if (meson_ecc[i].strength == nand->ecc.strength) {
+ if (meson_ecc[i].strength == nand->ecc.strength &&
+ meson_ecc[i].size == nand->ecc.size) {
meson_chip->bch_mode = meson_ecc[i].bch;
return 0;
}
diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c
index ab05ee65702c..215610f808f1 100644
--- a/drivers/mtd/nand/raw/mpc5121_nfc.c
+++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
@@ -21,10 +21,10 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <asm/mpc5121.h>
@@ -595,8 +595,6 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
struct nand_chip *chip = mtd_to_nand(mtd);
struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
- clk_disable_unprepare(prv->clk);
-
if (prv->csreg)
iounmap(prv->csreg);
}
@@ -717,17 +715,12 @@ static int mpc5121_nfc_probe(struct platform_device *op)
}
/* Enable NFC clock */
- clk = devm_clk_get(dev, "ipg");
+ clk = devm_clk_get_enabled(dev, "ipg");
if (IS_ERR(clk)) {
- dev_err(dev, "Unable to acquire NFC clock!\n");
+ dev_err(dev, "Unable to acquire and enable NFC clock!\n");
retval = PTR_ERR(clk);
goto error;
}
- retval = clk_prepare_enable(clk);
- if (retval) {
- dev_err(dev, "Unable to enable NFC clock!\n");
- goto error;
- }
prv->clk = clk;
/* Reset NAND Flash controller */
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index b2fa6b2074ab..29c8bddde67f 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/iopoll.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mtd/nand-ecc-mtk.h>
/* NAND controller register definition */
@@ -1119,32 +1118,6 @@ static irqreturn_t mtk_nfc_irq(int irq, void *id)
return IRQ_HANDLED;
}
-static int mtk_nfc_enable_clk(struct device *dev, struct mtk_nfc_clk *clk)
-{
- int ret;
-
- ret = clk_prepare_enable(clk->nfi_clk);
- if (ret) {
- dev_err(dev, "failed to enable nfi clk\n");
- return ret;
- }
-
- ret = clk_prepare_enable(clk->pad_clk);
- if (ret) {
- dev_err(dev, "failed to enable pad clk\n");
- clk_disable_unprepare(clk->nfi_clk);
- return ret;
- }
-
- return 0;
-}
-
-static void mtk_nfc_disable_clk(struct mtk_nfc_clk *clk)
-{
- clk_disable_unprepare(clk->nfi_clk);
- clk_disable_unprepare(clk->pad_clk);
-}
-
static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oob_region)
{
@@ -1546,40 +1519,36 @@ static int mtk_nfc_probe(struct platform_device *pdev)
goto release_ecc;
}
- nfc->clk.nfi_clk = devm_clk_get(dev, "nfi_clk");
+ nfc->clk.nfi_clk = devm_clk_get_enabled(dev, "nfi_clk");
if (IS_ERR(nfc->clk.nfi_clk)) {
dev_err(dev, "no clk\n");
ret = PTR_ERR(nfc->clk.nfi_clk);
goto release_ecc;
}
- nfc->clk.pad_clk = devm_clk_get(dev, "pad_clk");
+ nfc->clk.pad_clk = devm_clk_get_enabled(dev, "pad_clk");
if (IS_ERR(nfc->clk.pad_clk)) {
dev_err(dev, "no pad clk\n");
ret = PTR_ERR(nfc->clk.pad_clk);
goto release_ecc;
}
- ret = mtk_nfc_enable_clk(dev, &nfc->clk);
- if (ret)
- goto release_ecc;
-
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -EINVAL;
- goto clk_disable;
+ goto release_ecc;
}
ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc);
if (ret) {
dev_err(dev, "failed to request nfi irq\n");
- goto clk_disable;
+ goto release_ecc;
}
ret = dma_set_mask(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "failed to set dma mask\n");
- goto clk_disable;
+ goto release_ecc;
}
platform_set_drvdata(pdev, nfc);
@@ -1587,14 +1556,11 @@ static int mtk_nfc_probe(struct platform_device *pdev)
ret = mtk_nfc_nand_chips_init(dev, nfc);
if (ret) {
dev_err(dev, "failed to init nand chips\n");
- goto clk_disable;
+ goto release_ecc;
}
return 0;
-clk_disable:
- mtk_nfc_disable_clk(&nfc->clk);
-
release_ecc:
mtk_ecc_release(nfc->ecc);
@@ -1619,7 +1585,6 @@ static void mtk_nfc_remove(struct platform_device *pdev)
}
mtk_ecc_release(nfc->ecc);
- mtk_nfc_disable_clk(&nfc->clk);
}
#ifdef CONFIG_PM_SLEEP
@@ -1627,7 +1592,8 @@ static int mtk_nfc_suspend(struct device *dev)
{
struct mtk_nfc *nfc = dev_get_drvdata(dev);
- mtk_nfc_disable_clk(&nfc->clk);
+ clk_disable_unprepare(nfc->clk.nfi_clk);
+ clk_disable_unprepare(nfc->clk.pad_clk);
return 0;
}
@@ -1642,9 +1608,18 @@ static int mtk_nfc_resume(struct device *dev)
udelay(200);
- ret = mtk_nfc_enable_clk(dev, &nfc->clk);
- if (ret)
+ ret = clk_prepare_enable(nfc->clk.nfi_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable nfi clk\n");
return ret;
+ }
+
+ ret = clk_prepare_enable(nfc->clk.pad_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable pad clk\n");
+ clk_disable_unprepare(nfc->clk.nfi_clk);
+ return ret;
+ }
/* reset NAND chip if VCC was powered off */
list_for_each_entry(chip, &nfc->chips, node) {
diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
index 3d4b2e8294ea..003008355b3c 100644
--- a/drivers/mtd/nand/raw/mxc_nand.c
+++ b/drivers/mtd/nand/raw/mxc_nand.c
@@ -20,7 +20,6 @@
#include <linux/irq.h>
#include <linux/completion.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#define DRIVER_NAME "mxc_nand"
@@ -1696,7 +1695,6 @@ static int mxcnd_probe(struct platform_device *pdev)
struct nand_chip *this;
struct mtd_info *mtd;
struct mxc_nand_host *host;
- struct resource *res;
int err = 0;
/* Allocate memory for MTD device structure and private data */
@@ -1740,17 +1738,15 @@ static int mxcnd_probe(struct platform_device *pdev)
this->options |= NAND_KEEP_TIMINGS;
if (host->devtype_data->needs_ip) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->regs_ip = devm_ioremap_resource(&pdev->dev, res);
+ host->regs_ip = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->regs_ip))
return PTR_ERR(host->regs_ip);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ host->base = devm_platform_ioremap_resource(pdev, 1);
} else {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->base = devm_platform_ioremap_resource(pdev, 0);
}
- host->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(host->base))
return PTR_ERR(host->base);
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index a6af521832aa..d4b55155aeae 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -1885,6 +1885,7 @@ int nand_exit_status_op(struct nand_chip *chip)
return 0;
}
+EXPORT_SYMBOL_GPL(nand_exit_status_op);
/**
* nand_erase_op - Do an erase operation
diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c
index 57f3db32122d..3bb32a7c6d67 100644
--- a/drivers/mtd/nand/raw/ndfc.c
+++ b/drivers/mtd/nand/raw/ndfc.c
@@ -22,8 +22,9 @@
#include <linux/mtd/ndfc.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
#define NDFC_MAX_CS 4
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index db22b3af16d8..c45bef6158e7 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -22,7 +22,7 @@
#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/platform_data/elm.h>
@@ -2219,8 +2219,7 @@ static int omap_nand_probe(struct platform_device *pdev)
}
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vaddr = devm_ioremap_resource(&pdev->dev, res);
+ vaddr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c
index 7e0313889b50..2951d81614fd 100644
--- a/drivers/mtd/nand/raw/orion_nand.c
+++ b/drivers/mtd/nand/raw/orion_nand.c
@@ -169,16 +169,10 @@ static int __init orion_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
/* Not all platforms can gate the clock, so it is optional. */
- info->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ info->clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
if (IS_ERR(info->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(info->clk),
- "failed to get clock!\n");
-
- ret = clk_prepare_enable(info->clk);
- if (ret) {
- dev_err(&pdev->dev, "failed to prepare clock!\n");
- return ret;
- }
+ "failed to get and enable clock!\n");
/*
* This driver assumes that the default ECC engine should be TYPE_SOFT.
@@ -189,19 +183,13 @@ static int __init orion_nand_probe(struct platform_device *pdev)
ret = nand_scan(nc, 1);
if (ret)
- goto no_dev;
+ return ret;
mtd->name = "orion_nand";
ret = mtd_device_register(mtd, board->parts, board->nr_parts);
- if (ret) {
+ if (ret)
nand_cleanup(nc);
- goto no_dev;
- }
-
- return 0;
-no_dev:
- clk_disable_unprepare(info->clk);
return ret;
}
@@ -215,8 +203,6 @@ static void orion_nand_remove(struct platform_device *pdev)
WARN_ON(ret);
nand_cleanup(chip);
-
- clk_disable_unprepare(info->clk);
}
#ifdef CONFIG_OF
diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c
deleted file mode 100644
index e3c9807df1cd..000000000000
--- a/drivers/mtd/nand/raw/oxnas_nand.c
+++ /dev/null
@@ -1,209 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Oxford Semiconductor OXNAS NAND driver
-
- * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
- * Heavily based on plat_nand.c :
- * Author: Vitaly Wool <vitalywool@gmail.com>
- * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com>
- * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/reset.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/of.h>
-
-/* Nand commands */
-#define OXNAS_NAND_CMD_ALE BIT(18)
-#define OXNAS_NAND_CMD_CLE BIT(19)
-
-#define OXNAS_NAND_MAX_CHIPS 1
-
-struct oxnas_nand_ctrl {
- struct nand_controller base;
- void __iomem *io_base;
- struct clk *clk;
- struct nand_chip *chips[OXNAS_NAND_MAX_CHIPS];
- unsigned int nchips;
-};
-
-static uint8_t oxnas_nand_read_byte(struct nand_chip *chip)
-{
- struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
-
- return readb(oxnas->io_base);
-}
-
-static void oxnas_nand_read_buf(struct nand_chip *chip, u8 *buf, int len)
-{
- struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
-
- ioread8_rep(oxnas->io_base, buf, len);
-}
-
-static void oxnas_nand_write_buf(struct nand_chip *chip, const u8 *buf,
- int len)
-{
- struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
-
- iowrite8_rep(oxnas->io_base, buf, len);
-}
-
-/* Single CS command control */
-static void oxnas_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
- unsigned int ctrl)
-{
- struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
-
- if (ctrl & NAND_CLE)
- writeb(cmd, oxnas->io_base + OXNAS_NAND_CMD_CLE);
- else if (ctrl & NAND_ALE)
- writeb(cmd, oxnas->io_base + OXNAS_NAND_CMD_ALE);
-}
-
-/*
- * Probe for the NAND device.
- */
-static int oxnas_nand_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct device_node *nand_np;
- struct oxnas_nand_ctrl *oxnas;
- struct nand_chip *chip;
- struct mtd_info *mtd;
- int count = 0;
- int err = 0;
- int i;
-
- /* Allocate memory for the device structure (and zero it) */
- oxnas = devm_kzalloc(&pdev->dev, sizeof(*oxnas),
- GFP_KERNEL);
- if (!oxnas)
- return -ENOMEM;
-
- nand_controller_init(&oxnas->base);
-
- oxnas->io_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(oxnas->io_base))
- return PTR_ERR(oxnas->io_base);
-
- oxnas->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(oxnas->clk))
- oxnas->clk = NULL;
-
- /* Only a single chip node is supported */
- count = of_get_child_count(np);
- if (count > 1)
- return -EINVAL;
-
- err = clk_prepare_enable(oxnas->clk);
- if (err)
- return err;
-
- device_reset_optional(&pdev->dev);
-
- for_each_child_of_node(np, nand_np) {
- chip = devm_kzalloc(&pdev->dev, sizeof(struct nand_chip),
- GFP_KERNEL);
- if (!chip) {
- err = -ENOMEM;
- goto err_release_child;
- }
-
- chip->controller = &oxnas->base;
-
- nand_set_flash_node(chip, nand_np);
- nand_set_controller_data(chip, oxnas);
-
- mtd = nand_to_mtd(chip);
- mtd->dev.parent = &pdev->dev;
- mtd->priv = chip;
-
- chip->legacy.cmd_ctrl = oxnas_nand_cmd_ctrl;
- chip->legacy.read_buf = oxnas_nand_read_buf;
- chip->legacy.read_byte = oxnas_nand_read_byte;
- chip->legacy.write_buf = oxnas_nand_write_buf;
- chip->legacy.chip_delay = 30;
-
- /* Scan to find existence of the device */
- err = nand_scan(chip, 1);
- if (err)
- goto err_release_child;
-
- err = mtd_device_register(mtd, NULL, 0);
- if (err)
- goto err_cleanup_nand;
-
- oxnas->chips[oxnas->nchips++] = chip;
- }
-
- /* Exit if no chips found */
- if (!oxnas->nchips) {
- err = -ENODEV;
- goto err_clk_unprepare;
- }
-
- platform_set_drvdata(pdev, oxnas);
-
- return 0;
-
-err_cleanup_nand:
- nand_cleanup(chip);
-err_release_child:
- of_node_put(nand_np);
-
- for (i = 0; i < oxnas->nchips; i++) {
- chip = oxnas->chips[i];
- WARN_ON(mtd_device_unregister(nand_to_mtd(chip)));
- nand_cleanup(chip);
- }
-
-err_clk_unprepare:
- clk_disable_unprepare(oxnas->clk);
- return err;
-}
-
-static void oxnas_nand_remove(struct platform_device *pdev)
-{
- struct oxnas_nand_ctrl *oxnas = platform_get_drvdata(pdev);
- struct nand_chip *chip;
- int i;
-
- for (i = 0; i < oxnas->nchips; i++) {
- chip = oxnas->chips[i];
- WARN_ON(mtd_device_unregister(nand_to_mtd(chip)));
- nand_cleanup(chip);
- }
-
- clk_disable_unprepare(oxnas->clk);
-}
-
-static const struct of_device_id oxnas_nand_match[] = {
- { .compatible = "oxsemi,ox820-nand" },
- {},
-};
-MODULE_DEVICE_TABLE(of, oxnas_nand_match);
-
-static struct platform_driver oxnas_nand_driver = {
- .probe = oxnas_nand_probe,
- .remove_new = oxnas_nand_remove,
- .driver = {
- .name = "oxnas_nand",
- .of_match_table = oxnas_nand_match,
- },
-};
-
-module_platform_driver(oxnas_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
-MODULE_DESCRIPTION("Oxnas NAND driver");
-MODULE_ALIAS("platform:oxnas_nand");
diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c
index 28b7bd7e22eb..8da5fee321b5 100644
--- a/drivers/mtd/nand/raw/pl35x-nand-controller.c
+++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c
@@ -23,9 +23,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 72d6168d8a1b..64499c1b3603 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2,19 +2,19 @@
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*/
-#include <linux/clk.h>
-#include <linux/slab.h>
#include <linux/bitops.h>
-#include <linux/dma/qcom_adm.h>
-#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/qcom_adm.h>
+#include <linux/dma/qcom_bam_dma.h>
#include <linux/module.h>
-#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/delay.h>
-#include <linux/dma/qcom_bam_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
/* NANDc reg offsets */
#define NAND_FLASH_CMD 0x00
@@ -123,8 +123,8 @@
/* NAND_ERASED_CW_DETECT_CFG bits */
#define ERASED_CW_ECC_MASK 1
#define AUTO_DETECT_RES 0
-#define MASK_ECC (1 << ERASED_CW_ECC_MASK)
-#define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
+#define MASK_ECC BIT(ERASED_CW_ECC_MASK)
+#define RESET_ERASED_DET BIT(AUTO_DETECT_RES)
#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
@@ -157,6 +157,7 @@
#define OP_PAGE_PROGRAM_WITH_ECC 0x7
#define OP_PROGRAM_PAGE_SPARE 0x9
#define OP_BLOCK_ERASE 0xa
+#define OP_CHECK_STATUS 0xc
#define OP_FETCH_ID 0xb
#define OP_RESET_DEVICE 0xd
@@ -211,7 +212,7 @@ nandc_set_reg(chip, reg, \
/* Returns the dma address for reg read buffer */
#define reg_buf_dma_addr(chip, vaddr) \
((chip)->reg_read_dma + \
- ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
+ ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
#define QPIC_PER_CW_CMD_ELEMENTS 32
#define QPIC_PER_CW_CMD_SGL 32
@@ -235,6 +236,8 @@ nandc_set_reg(chip, reg, \
*/
#define NAND_ERASED_CW_SET BIT(4)
+#define MAX_ADDRESS_CYCLE 5
+
/*
* This data type corresponds to the BAM transaction which will be used for all
* NAND transfers.
@@ -382,6 +385,9 @@ struct nandc_regs {
* @reg_read_pos: marker for data read in reg_read_buf
*
* @cmd1/vld: some fixed controller register values
+ *
+ * @exec_opwrite: flag to select correct number of code word
+ * while reading status
*/
struct qcom_nand_controller {
struct device *dev;
@@ -432,6 +438,7 @@ struct qcom_nand_controller {
int reg_read_pos;
u32 cmd1, vld;
+ bool exec_opwrite;
};
/*
@@ -448,6 +455,29 @@ struct qcom_nand_boot_partition {
};
/*
+ * Qcom op for each exec_op transfer
+ *
+ * @data_instr: data instruction pointer
+ * @data_instr_idx: data instruction index
+ * @rdy_timeout_ms: wait ready timeout in ms
+ * @rdy_delay_ns: Additional delay in ns
+ * @addr1_reg: Address1 register value
+ * @addr2_reg: Address2 register value
+ * @cmd_reg: CMD register value
+ * @flag: flag for misc instruction
+ */
+struct qcom_op {
+ const struct nand_op_instr *data_instr;
+ unsigned int data_instr_idx;
+ unsigned int rdy_timeout_ms;
+ unsigned int rdy_delay_ns;
+ u32 addr1_reg;
+ u32 addr2_reg;
+ u32 cmd_reg;
+ u8 flag;
+};
+
+/*
* NAND chip structure
*
* @boot_partitions: array of boot partitions where offset and size of the
@@ -1273,182 +1303,33 @@ static void config_nand_cw_write(struct nand_chip *chip)
write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
}
-/*
- * the following functions are used within chip->legacy.cmdfunc() to
- * perform different NAND_CMD_* commands
- */
-
-/* sets up descriptors for NAND_CMD_PARAM */
-static int nandc_param(struct qcom_nand_host *host)
-{
- struct nand_chip *chip = &host->chip;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- /*
- * NAND_CMD_PARAM is called before we know much about the FLASH chip
- * in use. we configure the controller to perform a raw read of 512
- * bytes to read onfi params
- */
- if (nandc->props->qpic_v2)
- nandc_set_reg(chip, NAND_FLASH_CMD, OP_PAGE_READ_ONFI_READ |
- PAGE_ACC | LAST_PAGE);
- else
- nandc_set_reg(chip, NAND_FLASH_CMD, OP_PAGE_READ |
- PAGE_ACC | LAST_PAGE);
-
- nandc_set_reg(chip, NAND_ADDR0, 0);
- nandc_set_reg(chip, NAND_ADDR1, 0);
- nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
- | 512 << UD_SIZE_BYTES
- | 5 << NUM_ADDR_CYCLES
- | 0 << SPARE_SIZE_BYTES);
- nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
- | 0 << CS_ACTIVE_BSY
- | 17 << BAD_BLOCK_BYTE_NUM
- | 1 << BAD_BLOCK_IN_SPARE_AREA
- | 2 << WR_RD_BSY_GAP
- | 0 << WIDE_FLASH
- | 1 << DEV0_CFG1_ECC_DISABLE);
- if (!nandc->props->qpic_v2)
- nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
-
- /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
- if (!nandc->props->qpic_v2) {
- nandc_set_reg(chip, NAND_DEV_CMD_VLD,
- (nandc->vld & ~READ_START_VLD));
- nandc_set_reg(chip, NAND_DEV_CMD1,
- (nandc->cmd1 & ~(0xFF << READ_ADDR))
- | NAND_CMD_PARAM << READ_ADDR);
- }
-
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-
- if (!nandc->props->qpic_v2) {
- nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
- nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
- }
-
- nandc_set_read_loc(chip, 0, 0, 0, 512, 1);
-
- if (!nandc->props->qpic_v2) {
- write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
- write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
- }
-
- nandc->buf_count = 512;
- memset(nandc->data_buffer, 0xff, nandc->buf_count);
-
- config_nand_single_cw_page_read(chip, false, 0);
-
- read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
- nandc->buf_count, 0);
-
- /* restore CMD1 and VLD regs */
- if (!nandc->props->qpic_v2) {
- write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
- write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
- }
-
- return 0;
-}
-
-/* sets up descriptors for NAND_CMD_ERASE1 */
-static int erase_block(struct qcom_nand_host *host, int page_addr)
-{
- struct nand_chip *chip = &host->chip;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- nandc_set_reg(chip, NAND_FLASH_CMD,
- OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
- nandc_set_reg(chip, NAND_ADDR0, page_addr);
- nandc_set_reg(chip, NAND_ADDR1, 0);
- nandc_set_reg(chip, NAND_DEV0_CFG0,
- host->cfg0_raw & ~(7 << CW_PER_PAGE));
- nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
- nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
- nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
-
- write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-
- write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
- write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
-
- return 0;
-}
-
-/* sets up descriptors for NAND_CMD_READID */
-static int read_id(struct qcom_nand_host *host, int column)
-{
- struct nand_chip *chip = &host->chip;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- if (column == -1)
- return 0;
-
- nandc_set_reg(chip, NAND_FLASH_CMD, OP_FETCH_ID);
- nandc_set_reg(chip, NAND_ADDR0, column);
- nandc_set_reg(chip, NAND_ADDR1, 0);
- nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
- nandc->props->is_bam ? 0 : DM_EN);
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-
- write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-
- read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
-
- return 0;
-}
-
-/* sets up descriptors for NAND_CMD_RESET */
-static int reset(struct qcom_nand_host *host)
-{
- struct nand_chip *chip = &host->chip;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- nandc_set_reg(chip, NAND_FLASH_CMD, OP_RESET_DEVICE);
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-
- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-
- return 0;
-}
-
/* helpers to submit/free our list of dma descriptors */
static int submit_descs(struct qcom_nand_controller *nandc)
{
- struct desc_info *desc;
+ struct desc_info *desc, *n;
dma_cookie_t cookie = 0;
struct bam_transaction *bam_txn = nandc->bam_txn;
- int r;
+ int ret = 0;
if (nandc->props->is_bam) {
if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
- r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
- if (r)
- return r;
+ ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
+ if (ret)
+ goto err_unmap_free_desc;
}
if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
- r = prepare_bam_async_desc(nandc, nandc->tx_chan,
+ ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
DMA_PREP_INTERRUPT);
- if (r)
- return r;
+ if (ret)
+ goto err_unmap_free_desc;
}
if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
- r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
DMA_PREP_CMD);
- if (r)
- return r;
+ if (ret)
+ goto err_unmap_free_desc;
}
}
@@ -1470,19 +1351,17 @@ static int submit_descs(struct qcom_nand_controller *nandc)
if (!wait_for_completion_timeout(&bam_txn->txn_done,
QPIC_NAND_COMPLETION_TIMEOUT))
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
} else {
if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
}
- return 0;
-}
-
-static void free_descs(struct qcom_nand_controller *nandc)
-{
- struct desc_info *desc, *n;
-
+err_unmap_free_desc:
+ /*
+ * Unmap the dma sg_list and free the desc allocated by both
+ * prepare_bam_async_desc() and prep_adm_dma_desc() functions.
+ */
list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
list_del(&desc->node);
@@ -1495,6 +1374,8 @@ static void free_descs(struct qcom_nand_controller *nandc)
kfree(desc);
}
+
+ return ret;
}
/* reset the register read buffer for next NAND operation */
@@ -1504,152 +1385,6 @@ static void clear_read_regs(struct qcom_nand_controller *nandc)
nandc_read_buffer_sync(nandc, false);
}
-static void pre_command(struct qcom_nand_host *host, int command)
-{
- struct nand_chip *chip = &host->chip;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- nandc->buf_count = 0;
- nandc->buf_start = 0;
- host->use_ecc = false;
- host->last_command = command;
-
- clear_read_regs(nandc);
-
- if (command == NAND_CMD_RESET || command == NAND_CMD_READID ||
- command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1)
- clear_bam_transaction(nandc);
-}
-
-/*
- * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
- * privately maintained status byte, this status byte can be read after
- * NAND_CMD_STATUS is called
- */
-static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
-{
- struct nand_chip *chip = &host->chip;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
- int num_cw;
- int i;
-
- num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
- nandc_read_buffer_sync(nandc, true);
-
- for (i = 0; i < num_cw; i++) {
- u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
-
- if (flash_status & FS_MPU_ERR)
- host->status &= ~NAND_STATUS_WP;
-
- if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
- (flash_status &
- FS_DEVICE_STS_ERR)))
- host->status |= NAND_STATUS_FAIL;
- }
-}
-
-static void post_command(struct qcom_nand_host *host, int command)
-{
- struct nand_chip *chip = &host->chip;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- switch (command) {
- case NAND_CMD_READID:
- nandc_read_buffer_sync(nandc, true);
- memcpy(nandc->data_buffer, nandc->reg_read_buf,
- nandc->buf_count);
- break;
- case NAND_CMD_PAGEPROG:
- case NAND_CMD_ERASE1:
- parse_erase_write_errors(host, command);
- break;
- default:
- break;
- }
-}
-
-/*
- * Implements chip->legacy.cmdfunc. It's only used for a limited set of
- * commands. The rest of the commands wouldn't be called by upper layers.
- * For example, NAND_CMD_READOOB would never be called because we have our own
- * versions of read_oob ops for nand_ecc_ctrl.
- */
-static void qcom_nandc_command(struct nand_chip *chip, unsigned int command,
- int column, int page_addr)
-{
- struct qcom_nand_host *host = to_qcom_nand_host(chip);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- bool wait = false;
- int ret = 0;
-
- pre_command(host, command);
-
- switch (command) {
- case NAND_CMD_RESET:
- ret = reset(host);
- wait = true;
- break;
-
- case NAND_CMD_READID:
- nandc->buf_count = 4;
- ret = read_id(host, column);
- wait = true;
- break;
-
- case NAND_CMD_PARAM:
- ret = nandc_param(host);
- wait = true;
- break;
-
- case NAND_CMD_ERASE1:
- ret = erase_block(host, page_addr);
- wait = true;
- break;
-
- case NAND_CMD_READ0:
- /* we read the entire page for now */
- WARN_ON(column != 0);
-
- host->use_ecc = true;
- set_address(host, 0, page_addr);
- update_rw_regs(host, ecc->steps, true, 0);
- break;
-
- case NAND_CMD_SEQIN:
- WARN_ON(column != 0);
- set_address(host, 0, page_addr);
- break;
-
- case NAND_CMD_PAGEPROG:
- case NAND_CMD_STATUS:
- case NAND_CMD_NONE:
- default:
- break;
- }
-
- if (ret) {
- dev_err(nandc->dev, "failure executing command %d\n",
- command);
- free_descs(nandc);
- return;
- }
-
- if (wait) {
- ret = submit_descs(nandc);
- if (ret)
- dev_err(nandc->dev,
- "failure submitting descs for command %d\n",
- command);
- }
-
- free_descs(nandc);
-
- post_command(host, command);
-}
-
/*
* when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
* an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
@@ -1736,6 +1471,9 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
int raw_cw = cw;
nand_read_page_op(chip, page, 0, NULL, 0);
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ clear_read_regs(nandc);
host->use_ecc = false;
if (nandc->props->qpic_v2)
@@ -1786,7 +1524,6 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
ret = submit_descs(nandc);
- free_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
return ret;
@@ -1819,7 +1556,7 @@ check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
u8 *cw_data_buf, *cw_oob_buf;
- int cw, data_size, oob_size, ret = 0;
+ int cw, data_size, oob_size, ret;
if (!data_buf)
data_buf = nand_get_data_buf(chip);
@@ -2040,8 +1777,6 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
}
ret = submit_descs(nandc);
- free_descs(nandc);
-
if (ret) {
dev_err(nandc->dev, "failure to read page/oob\n");
return ret;
@@ -2080,8 +1815,6 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
if (ret)
dev_err(nandc->dev, "failed to copy last codeword\n");
- free_descs(nandc);
-
return ret;
}
@@ -2149,17 +1882,25 @@ static void qcom_nandc_codeword_fixup(struct qcom_nand_host *host, int page)
}
/* implements ecc->read_page() */
-static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
+static int qcom_nandc_read_page(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
u8 *data_buf, *oob_buf = NULL;
if (host->nr_boot_partitions)
qcom_nandc_codeword_fixup(host, page);
nand_read_page_op(chip, page, 0, NULL, 0);
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = true;
+ clear_read_regs(nandc);
+ set_address(host, 0, page);
+ update_rw_regs(host, ecc->steps, true, 0);
+
data_buf = buf;
oob_buf = oob_required ? chip->oob_poi : NULL;
@@ -2169,7 +1910,7 @@ static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
}
/* implements ecc->read_page_raw() */
-static int qcom_nandc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
+static int qcom_nandc_read_page_raw(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
@@ -2215,7 +1956,7 @@ static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
}
/* implements ecc->write_page() */
-static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
+static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
struct qcom_nand_host *host = to_qcom_nand_host(chip);
@@ -2229,6 +1970,9 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ set_address(host, 0, page);
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -2251,7 +1995,6 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
oob_size = ecc->bytes;
}
-
write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
@@ -2276,20 +2019,17 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
}
ret = submit_descs(nandc);
- if (ret)
+ if (ret) {
dev_err(nandc->dev, "failure to write page\n");
+ return ret;
+ }
- free_descs(nandc);
-
- if (!ret)
- ret = nand_prog_page_end_op(chip);
-
- return ret;
+ return nand_prog_page_end_op(chip);
}
/* implements ecc->write_page_raw() */
static int qcom_nandc_write_page_raw(struct nand_chip *chip,
- const uint8_t *buf, int oob_required,
+ const u8 *buf, int oob_required,
int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
@@ -2352,15 +2092,12 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
}
ret = submit_descs(nandc);
- if (ret)
+ if (ret) {
dev_err(nandc->dev, "failure to write raw page\n");
+ return ret;
+ }
- free_descs(nandc);
-
- if (!ret)
- ret = nand_prog_page_end_op(chip);
-
- return ret;
+ return nand_prog_page_end_op(chip);
}
/*
@@ -2404,12 +2141,9 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
config_nand_cw_write(chip);
ret = submit_descs(nandc);
-
- free_descs(nandc);
-
if (ret) {
dev_err(nandc->dev, "failure to write oob\n");
- return -EIO;
+ return ret;
}
return nand_prog_page_end_op(chip);
@@ -2483,73 +2217,12 @@ static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
config_nand_cw_write(chip);
ret = submit_descs(nandc);
-
- free_descs(nandc);
-
if (ret) {
dev_err(nandc->dev, "failure to update BBM\n");
- return -EIO;
- }
-
- return nand_prog_page_end_op(chip);
-}
-
-/*
- * the three functions below implement chip->legacy.read_byte(),
- * chip->legacy.read_buf() and chip->legacy.write_buf() respectively. these
- * aren't used for reading/writing page data, they are used for smaller data
- * like reading id, status etc
- */
-static uint8_t qcom_nandc_read_byte(struct nand_chip *chip)
-{
- struct qcom_nand_host *host = to_qcom_nand_host(chip);
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- u8 *buf = nandc->data_buffer;
- u8 ret = 0x0;
-
- if (host->last_command == NAND_CMD_STATUS) {
- ret = host->status;
-
- host->status = NAND_STATUS_READY | NAND_STATUS_WP;
-
return ret;
}
- if (nandc->buf_start < nandc->buf_count)
- ret = buf[nandc->buf_start++];
-
- return ret;
-}
-
-static void qcom_nandc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
-{
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
-
- memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
- nandc->buf_start += real_len;
-}
-
-static void qcom_nandc_write_buf(struct nand_chip *chip, const uint8_t *buf,
- int len)
-{
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
-
- memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
-
- nandc->buf_start += real_len;
-}
-
-/* we support only one external chip for now */
-static void qcom_nandc_select_chip(struct nand_chip *chip, int chipnr)
-{
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- if (chipnr <= 0)
- return;
-
- dev_warn(nandc->dev, "invalid chip select\n");
+ return nand_prog_page_end_op(chip);
}
/*
@@ -2660,7 +2333,7 @@ static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
}
static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
+ struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct qcom_nand_host *host = to_qcom_nand_host(chip);
@@ -2685,6 +2358,7 @@ qcom_nandc_calc_ecc_bytes(int step_size, int strength)
{
return strength == 4 ? 12 : 16;
}
+
NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes,
NANDC_STEP_SIZE, 4, 8);
@@ -2867,8 +2541,479 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
return 0;
}
+static int qcom_op_cmd_mapping(struct nand_chip *chip, u8 opcode,
+ struct qcom_op *q_op)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ int cmd;
+
+ switch (opcode) {
+ case NAND_CMD_RESET:
+ cmd = OP_RESET_DEVICE;
+ break;
+ case NAND_CMD_READID:
+ cmd = OP_FETCH_ID;
+ break;
+ case NAND_CMD_PARAM:
+ if (nandc->props->qpic_v2)
+ cmd = OP_PAGE_READ_ONFI_READ;
+ else
+ cmd = OP_PAGE_READ;
+ break;
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ cmd = OP_BLOCK_ERASE;
+ break;
+ case NAND_CMD_STATUS:
+ cmd = OP_CHECK_STATUS;
+ break;
+ case NAND_CMD_PAGEPROG:
+ cmd = OP_PROGRAM_PAGE;
+ q_op->flag = OP_PROGRAM_PAGE;
+ nandc->exec_opwrite = true;
+ break;
+ case NAND_CMD_READ0:
+ case NAND_CMD_READSTART:
+ if (host->use_ecc)
+ cmd = OP_PAGE_READ_WITH_ECC;
+ else
+ cmd = OP_PAGE_READ;
+ break;
+ default:
+ dev_err(nandc->dev, "Opcode not supported: %u\n", opcode);
+ return -EOPNOTSUPP;
+ }
+
+ return cmd;
+}
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static int qcom_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct qcom_op *q_op)
+{
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id;
+ int i, ret;
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int offset, naddrs;
+ const u8 *addrs;
+
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ ret = qcom_op_cmd_mapping(chip, instr->ctx.cmd.opcode, q_op);
+ if (ret < 0)
+ return ret;
+
+ q_op->cmd_reg = ret;
+ q_op->rdy_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+ q_op->addr1_reg |= addrs[i] << (i * 8);
+
+ if (naddrs > 4)
+ q_op->addr2_reg |= addrs[4];
+
+ q_op->rdy_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ q_op->data_instr = instr;
+ q_op->data_instr_idx = op_id;
+ q_op->rdy_delay_ns = instr->delay_ns;
+ fallthrough;
+ case NAND_OP_DATA_OUT_INSTR:
+ q_op->rdy_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ q_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ q_op->rdy_delay_ns = instr->delay_ns;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void qcom_delay_ns(unsigned int ns)
+{
+ if (!ns)
+ return;
+
+ if (ns < 10000)
+ ndelay(ns);
+ else
+ udelay(DIV_ROUND_UP(ns, 1000));
+}
+
+static int qcom_wait_rdy_poll(struct nand_chip *chip, unsigned int time_ms)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ unsigned long start = jiffies + msecs_to_jiffies(time_ms);
+ u32 flash;
+
+ nandc_read_buffer_sync(nandc, true);
+
+ do {
+ flash = le32_to_cpu(nandc->reg_read_buf[0]);
+ if (flash & FS_READY_BSY_N)
+ return 0;
+ cpu_relax();
+ } while (time_after(start, jiffies));
+
+ dev_err(nandc->dev, "Timeout waiting for device to be ready:0x%08x\n", flash);
+
+ return -ETIMEDOUT;
+}
+
+static int qcom_read_status_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct qcom_op q_op = {};
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id = 0;
+ unsigned int len = 0;
+ int ret, num_cw, i;
+ u32 flash_status;
+
+ host->status = NAND_STATUS_READY | NAND_STATUS_WP;
+
+ ret = qcom_parse_instructions(chip, subop, &q_op);
+ if (ret)
+ return ret;
+
+ num_cw = nandc->exec_opwrite ? ecc->steps : 1;
+ nandc->exec_opwrite = false;
+
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = false;
+
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+ nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+ nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+
+ write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+ ret = submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure in submitting status descriptor\n");
+ goto err_out;
+ }
+
+ nandc_read_buffer_sync(nandc, true);
+
+ for (i = 0; i < num_cw; i++) {
+ flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
+
+ if (flash_status & FS_MPU_ERR)
+ host->status &= ~NAND_STATUS_WP;
+
+ if (flash_status & FS_OP_ERR ||
+ (i == (num_cw - 1) && (flash_status & FS_DEVICE_STS_ERR)))
+ host->status |= NAND_STATUS_FAIL;
+ }
+
+ flash_status = host->status;
+ instr = q_op.data_instr;
+ op_id = q_op.data_instr_idx;
+ len = nand_subop_get_data_len(subop, op_id);
+ memcpy(instr->ctx.data.buf.in, &flash_status, len);
+
+err_out:
+ return ret;
+}
+
+static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_op q_op = {};
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id = 0;
+ unsigned int len = 0;
+ int ret;
+
+ ret = qcom_parse_instructions(chip, subop, &q_op);
+ if (ret)
+ return ret;
+
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = false;
+
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+ nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+ nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
+ nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
+ nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
+ nandc->props->is_bam ? 0 : DM_EN);
+
+ nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+
+ write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
+
+ ret = submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure in submitting read id descriptor\n");
+ goto err_out;
+ }
+
+ instr = q_op.data_instr;
+ op_id = q_op.data_instr_idx;
+ len = nand_subop_get_data_len(subop, op_id);
+
+ nandc_read_buffer_sync(nandc, true);
+ memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
+
+err_out:
+ return ret;
+}
+
+static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_op q_op = {};
+ int ret;
+ int instrs = 1;
+
+ ret = qcom_parse_instructions(chip, subop, &q_op);
+ if (ret)
+ return ret;
+
+ if (q_op.flag == OP_PROGRAM_PAGE) {
+ goto wait_rdy;
+ } else if (q_op.cmd_reg == OP_BLOCK_ERASE) {
+ q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
+ nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
+ nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
+ nandc_set_reg(chip, NAND_DEV0_CFG0,
+ host->cfg0_raw & ~(7 << CW_PER_PAGE));
+ nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
+ instrs = 3;
+ } else {
+ return 0;
+ }
+
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = false;
+
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+ nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+ nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+
+ write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
+ (q_op.cmd_reg == OP_BLOCK_ERASE) ? write_reg_dma(nandc, NAND_DEV0_CFG0,
+ 2, NAND_BAM_NEXT_SGL) : read_reg_dma(nandc,
+ NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+ ret = submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure in submitting misc descriptor\n");
+ goto err_out;
+ }
+
+wait_rdy:
+ qcom_delay_ns(q_op.rdy_delay_ns);
+ ret = qcom_wait_rdy_poll(chip, q_op.rdy_timeout_ms);
+
+err_out:
+ return ret;
+}
+
+static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct qcom_op q_op = {};
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id = 0;
+ unsigned int len = 0;
+ int ret;
+
+ ret = qcom_parse_instructions(chip, subop, &q_op);
+ if (ret)
+ return ret;
+
+ q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
+
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = false;
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+ nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+
+ nandc_set_reg(chip, NAND_ADDR0, 0);
+ nandc_set_reg(chip, NAND_ADDR1, 0);
+ nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
+ | 512 << UD_SIZE_BYTES
+ | 5 << NUM_ADDR_CYCLES
+ | 0 << SPARE_SIZE_BYTES);
+ nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
+ | 0 << CS_ACTIVE_BSY
+ | 17 << BAD_BLOCK_BYTE_NUM
+ | 1 << BAD_BLOCK_IN_SPARE_AREA
+ | 2 << WR_RD_BSY_GAP
+ | 0 << WIDE_FLASH
+ | 1 << DEV0_CFG1_ECC_DISABLE);
+ if (!nandc->props->qpic_v2)
+ nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
+
+ /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
+ if (!nandc->props->qpic_v2) {
+ nandc_set_reg(chip, NAND_DEV_CMD_VLD,
+ (nandc->vld & ~READ_START_VLD));
+ nandc_set_reg(chip, NAND_DEV_CMD1,
+ (nandc->cmd1 & ~(0xFF << READ_ADDR))
+ | NAND_CMD_PARAM << READ_ADDR);
+ }
+
+ nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+
+ if (!nandc->props->qpic_v2) {
+ nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
+ nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
+ }
+
+ instr = q_op.data_instr;
+ op_id = q_op.data_instr_idx;
+ len = nand_subop_get_data_len(subop, op_id);
+
+ nandc_set_read_loc(chip, 0, 0, 0, len, 1);
+
+ if (!nandc->props->qpic_v2) {
+ write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
+ write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
+ }
+
+ nandc->buf_count = len;
+ memset(nandc->data_buffer, 0xff, nandc->buf_count);
+
+ config_nand_single_cw_page_read(chip, false, 0);
+
+ read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
+ nandc->buf_count, 0);
+
+ /* restore CMD1 and VLD regs */
+ if (!nandc->props->qpic_v2) {
+ write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
+ write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
+ }
+
+ ret = submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure in submitting param page descriptor\n");
+ goto err_out;
+ }
+
+ ret = qcom_wait_rdy_poll(chip, q_op.rdy_timeout_ms);
+ if (ret)
+ goto err_out;
+
+ memcpy(instr->ctx.data.buf.in, nandc->data_buffer, len);
+
+err_out:
+ return ret;
+}
+
+static const struct nand_op_parser qcom_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(
+ qcom_read_id_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYCLE),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
+ NAND_OP_PARSER_PATTERN(
+ qcom_read_status_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
+ NAND_OP_PARSER_PATTERN(
+ qcom_param_page_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYCLE),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 512)),
+ NAND_OP_PARSER_PATTERN(
+ qcom_misc_cmd_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYCLE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ );
+
+static int qcom_check_op(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ const struct nand_op_instr *instr;
+ int op_id;
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (instr->ctx.cmd.opcode != NAND_CMD_RESET &&
+ instr->ctx.cmd.opcode != NAND_CMD_READID &&
+ instr->ctx.cmd.opcode != NAND_CMD_PARAM &&
+ instr->ctx.cmd.opcode != NAND_CMD_ERASE1 &&
+ instr->ctx.cmd.opcode != NAND_CMD_ERASE2 &&
+ instr->ctx.cmd.opcode != NAND_CMD_STATUS &&
+ instr->ctx.cmd.opcode != NAND_CMD_PAGEPROG &&
+ instr->ctx.cmd.opcode != NAND_CMD_READ0 &&
+ instr->ctx.cmd.opcode != NAND_CMD_READSTART)
+ return -EOPNOTSUPP;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int qcom_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ if (check_only)
+ return qcom_check_op(chip, op);
+
+ return nand_op_parser_exec_op(chip, &qcom_op_parser, op, check_only);
+}
+
static const struct nand_controller_ops qcom_nandc_ops = {
.attach_chip = qcom_nand_attach_chip,
+ .exec_op = qcom_nand_exec_op,
};
static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
@@ -2912,19 +3057,17 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
*/
nandc->buf_size = 532;
- nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
- GFP_KERNEL);
+ nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
if (!nandc->data_buffer)
return -ENOMEM;
- nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
- GFP_KERNEL);
+ nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
if (!nandc->regs)
return -ENOMEM;
- nandc->reg_read_buf = devm_kcalloc(nandc->dev,
- MAX_REG_RD, sizeof(*nandc->reg_read_buf),
- GFP_KERNEL);
+ nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
+ sizeof(*nandc->reg_read_buf),
+ GFP_KERNEL);
if (!nandc->reg_read_buf)
return -ENOMEM;
@@ -2969,7 +3112,7 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
/*
* Initially allocate BAM transaction to read ONFI param page.
* After detecting all the devices, this BAM transaction will
- * be freed and the next BAM tranasction will be allocated with
+ * be freed and the next BAM transaction will be allocated with
* maximum codeword size
*/
nandc->max_cwperpage = 1;
@@ -3135,14 +3278,6 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
mtd->owner = THIS_MODULE;
mtd->dev.parent = dev;
- chip->legacy.cmdfunc = qcom_nandc_command;
- chip->legacy.select_chip = qcom_nandc_select_chip;
- chip->legacy.read_byte = qcom_nandc_read_byte;
- chip->legacy.read_buf = qcom_nandc_read_buf;
- chip->legacy.write_buf = qcom_nandc_write_buf;
- chip->legacy.set_features = nand_get_set_features_notsupp;
- chip->legacy.get_features = nand_get_set_features_notsupp;
-
/*
* the bad block marker is readable only when we read the last codeword
* of a page with ECC disabled. currently, the nand_base and nand_bbt
diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c
index 5a04680342c3..5bc90ffa721f 100644
--- a/drivers/mtd/nand/raw/rockchip-nand-controller.c
+++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c
@@ -15,7 +15,6 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c
index ac80aaf5b4e3..3d3d5c9814ff 100644
--- a/drivers/mtd/nand/raw/s3c2410.c
+++ b/drivers/mtd/nand/raw/s3c2410.c
@@ -26,7 +26,6 @@
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
index 63bf20c41719..3e5df75cbc98 100644
--- a/drivers/mtd/nand/raw/sh_flctl.c
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -17,7 +17,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sh_dma.h>
@@ -1124,8 +1123,7 @@ static int flctl_probe(struct platform_device *pdev)
if (!flctl)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- flctl->reg = devm_ioremap_resource(&pdev->dev, res);
+ flctl->reg = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(flctl->reg))
return PTR_ERR(flctl->reg);
flctl->fifo = res->start + 0x24; /* FLDTFIFO */
diff --git a/drivers/mtd/nand/raw/socrates_nand.c b/drivers/mtd/nand/raw/socrates_nand.c
index a8b720ffe9e8..76d50eb9f1db 100644
--- a/drivers/mtd/nand/raw/socrates_nand.c
+++ b/drivers/mtd/nand/raw/socrates_nand.c
@@ -8,8 +8,9 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#define FPGA_NAND_CMD_MASK (0x7 << 28)
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 10c11cecac08..88811139aaf5 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -1922,8 +1922,8 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
if (!(nfc->cs_assigned & BIT(chip_cs)))
continue;
- res = platform_get_resource(pdev, IORESOURCE_MEM, mem_region);
- nfc->data_base[chip_cs] = devm_ioremap_resource(dev, res);
+ nfc->data_base[chip_cs] = devm_platform_get_and_ioremap_resource(pdev,
+ mem_region, &res);
if (IS_ERR(nfc->data_base[chip_cs]))
return PTR_ERR(nfc->data_base[chip_cs]);
@@ -1951,21 +1951,17 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
init_completion(&nfc->complete);
- nfc->clk = devm_clk_get(nfc->cdev, NULL);
- if (IS_ERR(nfc->clk))
+ nfc->clk = devm_clk_get_enabled(nfc->cdev, NULL);
+ if (IS_ERR(nfc->clk)) {
+ dev_err(dev, "can not get and enable the clock\n");
return PTR_ERR(nfc->clk);
-
- ret = clk_prepare_enable(nfc->clk);
- if (ret) {
- dev_err(dev, "can not enable the clock\n");
- return ret;
}
rstc = devm_reset_control_get(dev, NULL);
if (IS_ERR(rstc)) {
ret = PTR_ERR(rstc);
if (ret == -EPROBE_DEFER)
- goto err_clk_disable;
+ return ret;
} else {
reset_control_assert(rstc);
reset_control_deassert(rstc);
@@ -2018,9 +2014,6 @@ err_release_dma:
sg_free_table(&nfc->dma_data_sg);
sg_free_table(&nfc->dma_ecc_sg);
-err_clk_disable:
- clk_disable_unprepare(nfc->clk);
-
return ret;
}
@@ -2045,8 +2038,6 @@ static void stm32_fmc2_nfc_remove(struct platform_device *pdev)
sg_free_table(&nfc->dma_data_sg);
sg_free_table(&nfc->dma_ecc_sg);
- clk_disable_unprepare(nfc->clk);
-
stm32_fmc2_nfc_wp_enable(nand);
}
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 9884304634f6..9abf38049d35 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -19,7 +19,6 @@
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
@@ -2087,8 +2086,7 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
nand_controller_init(&nfc->controller);
INIT_LIST_HEAD(&nfc->chips);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nfc->regs = devm_ioremap_resource(dev, r);
+ nfc->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(nfc->regs))
return PTR_ERR(nfc->regs);
@@ -2096,37 +2094,26 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- nfc->ahb_clk = devm_clk_get(dev, "ahb");
+ nfc->ahb_clk = devm_clk_get_enabled(dev, "ahb");
if (IS_ERR(nfc->ahb_clk)) {
dev_err(dev, "failed to retrieve ahb clk\n");
return PTR_ERR(nfc->ahb_clk);
}
- ret = clk_prepare_enable(nfc->ahb_clk);
- if (ret)
- return ret;
-
- nfc->mod_clk = devm_clk_get(dev, "mod");
+ nfc->mod_clk = devm_clk_get_enabled(dev, "mod");
if (IS_ERR(nfc->mod_clk)) {
dev_err(dev, "failed to retrieve mod clk\n");
- ret = PTR_ERR(nfc->mod_clk);
- goto out_ahb_clk_unprepare;
+ return PTR_ERR(nfc->mod_clk);
}
- ret = clk_prepare_enable(nfc->mod_clk);
- if (ret)
- goto out_ahb_clk_unprepare;
-
nfc->reset = devm_reset_control_get_optional_exclusive(dev, "ahb");
- if (IS_ERR(nfc->reset)) {
- ret = PTR_ERR(nfc->reset);
- goto out_mod_clk_unprepare;
- }
+ if (IS_ERR(nfc->reset))
+ return PTR_ERR(nfc->reset);
ret = reset_control_deassert(nfc->reset);
if (ret) {
dev_err(dev, "reset err %d\n", ret);
- goto out_mod_clk_unprepare;
+ return ret;
}
nfc->caps = of_device_get_match_data(&pdev->dev);
@@ -2165,10 +2152,6 @@ out_release_dmac:
dma_release_channel(nfc->dmac);
out_ahb_reset_reassert:
reset_control_assert(nfc->reset);
-out_mod_clk_unprepare:
- clk_disable_unprepare(nfc->mod_clk);
-out_ahb_clk_unprepare:
- clk_disable_unprepare(nfc->ahb_clk);
return ret;
}
@@ -2183,8 +2166,6 @@ static void sunxi_nfc_remove(struct platform_device *pdev)
if (nfc->dmac)
dma_release_channel(nfc->dmac);
- clk_disable_unprepare(nfc->mod_clk);
- clk_disable_unprepare(nfc->ahb_clk);
}
static const struct sunxi_nfc_caps sunxi_nfc_a10_caps = {
diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
index 86522048e271..3f783b8f76c9 100644
--- a/drivers/mtd/nand/raw/vf610_nfc.c
+++ b/drivers/mtd/nand/raw/vf610_nfc.c
@@ -827,30 +827,24 @@ static int vf610_nfc_probe(struct platform_device *pdev)
mtd->name = DRV_NAME;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -EINVAL;
+ if (irq < 0)
+ return irq;
nfc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(nfc->regs))
return PTR_ERR(nfc->regs);
- nfc->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(nfc->clk))
+ nfc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(nfc->clk)) {
+ dev_err(nfc->dev, "Unable to get and enable clock!\n");
return PTR_ERR(nfc->clk);
-
- err = clk_prepare_enable(nfc->clk);
- if (err) {
- dev_err(nfc->dev, "Unable to enable clock!\n");
- return err;
}
of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev);
- if (!of_id) {
- err = -ENODEV;
- goto err_disable_clk;
- }
+ if (!of_id)
+ return -ENODEV;
- nfc->variant = (enum vf610_nfc_variant)of_id->data;
+ nfc->variant = (uintptr_t)of_id->data;
for_each_available_child_of_node(nfc->dev->of_node, child) {
if (of_device_is_compatible(child, "fsl,vf610-nfc-nandcs")) {
@@ -858,9 +852,8 @@ static int vf610_nfc_probe(struct platform_device *pdev)
if (nand_get_flash_node(chip)) {
dev_err(nfc->dev,
"Only one NAND chip supported!\n");
- err = -EINVAL;
of_node_put(child);
- goto err_disable_clk;
+ return -EINVAL;
}
nand_set_flash_node(chip, child);
@@ -869,8 +862,7 @@ static int vf610_nfc_probe(struct platform_device *pdev)
if (!nand_get_flash_node(chip)) {
dev_err(nfc->dev, "NAND chip sub-node missing!\n");
- err = -ENODEV;
- goto err_disable_clk;
+ return -ENODEV;
}
chip->options |= NAND_NO_SUBPAGE_WRITE;
@@ -880,7 +872,7 @@ static int vf610_nfc_probe(struct platform_device *pdev)
err = devm_request_irq(nfc->dev, irq, vf610_nfc_irq, 0, DRV_NAME, nfc);
if (err) {
dev_err(nfc->dev, "Error requesting IRQ!\n");
- goto err_disable_clk;
+ return err;
}
vf610_nfc_preinit_controller(nfc);
@@ -892,7 +884,7 @@ static int vf610_nfc_probe(struct platform_device *pdev)
/* Scan the NAND chip */
err = nand_scan(chip, 1);
if (err)
- goto err_disable_clk;
+ return err;
platform_set_drvdata(pdev, nfc);
@@ -904,8 +896,6 @@ static int vf610_nfc_probe(struct platform_device *pdev)
err_cleanup_nand:
nand_cleanup(chip);
-err_disable_clk:
- clk_disable_unprepare(nfc->clk);
return err;
}
@@ -918,7 +908,6 @@ static void vf610_nfc_remove(struct platform_device *pdev)
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
- clk_disable_unprepare(nfc->clk);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c
index 6b1e2a2bba15..51d802a165ed 100644
--- a/drivers/mtd/nand/raw/xway_nand.c
+++ b/drivers/mtd/nand/raw/xway_nand.c
@@ -7,7 +7,8 @@
#include <linux/mtd/rawnand.h>
#include <linux/of_gpio.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <lantiq_soc.h>
diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c
index 1a3ffb982335..31c439a557b1 100644
--- a/drivers/mtd/nand/spi/esmt.c
+++ b/drivers/mtd/nand/spi/esmt.c
@@ -121,6 +121,15 @@ static const struct spinand_info esmt_c8_spinand_table[] = {
&update_cache_variants),
0,
SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
+ SPINAND_INFO("F50D2G41KA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x51),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
};
static const struct spinand_manufacturer_ops esmt_spinand_manuf_ops = {
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index cfd7c3b26dc4..987710e09441 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -511,6 +511,26 @@ static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GQ5RExxH",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x21),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GQ4RExxH",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xc9),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
};
static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index a80427c13121..bbbcaa87c0bc 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -266,6 +266,39 @@ static const struct spinand_info toshiba_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 1Gb (1st generation) */
+ SPINAND_INFO("TC58NYG0S3HBAI4",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xA1),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 4Gb (1st generation) */
+ SPINAND_INFO("TH58NYG2S3HBAI4",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xAC),
+ NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 2, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 8Gb (1st generation) */
+ SPINAND_INFO("TH58NYG3S0HBAI6",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xA3),
+ NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
};
static const struct spinand_manufacturer_ops toshiba_spinand_manuf_ops = {
diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c
index 656dd80a0be7..58968c1e7d2f 100644
--- a/drivers/mtd/spi-nor/atmel.c
+++ b/drivers/mtd/spi-nor/atmel.c
@@ -48,9 +48,11 @@ static const struct spi_nor_locking_ops at25fs_nor_locking_ops = {
.is_locked = at25fs_nor_is_locked,
};
-static void at25fs_nor_late_init(struct spi_nor *nor)
+static int at25fs_nor_late_init(struct spi_nor *nor)
{
nor->params->locking_ops = &at25fs_nor_locking_ops;
+
+ return 0;
}
static const struct spi_nor_fixups at25fs_nor_fixups = {
@@ -149,9 +151,11 @@ static const struct spi_nor_locking_ops atmel_nor_global_protection_ops = {
.is_locked = atmel_nor_is_global_protected,
};
-static void atmel_nor_global_protection_late_init(struct spi_nor *nor)
+static int atmel_nor_global_protection_late_init(struct spi_nor *nor)
{
nor->params->locking_ops = &atmel_nor_global_protection_ops;
+
+ return 0;
}
static const struct spi_nor_fixups atmel_nor_global_protection_fixups = {
diff --git a/drivers/mtd/spi-nor/controllers/nxp-spifi.c b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
index 794c7b7d5c92..5d8f47ab146f 100644
--- a/drivers/mtd/spi-nor/controllers/nxp-spifi.c
+++ b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
@@ -17,7 +17,6 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/spi-nor.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
@@ -395,30 +394,18 @@ static int nxp_spifi_probe(struct platform_device *pdev)
if (IS_ERR(spifi->flash_base))
return PTR_ERR(spifi->flash_base);
- spifi->clk_spifi = devm_clk_get(&pdev->dev, "spifi");
+ spifi->clk_spifi = devm_clk_get_enabled(&pdev->dev, "spifi");
if (IS_ERR(spifi->clk_spifi)) {
- dev_err(&pdev->dev, "spifi clock not found\n");
+ dev_err(&pdev->dev, "spifi clock not found or unable to enable\n");
return PTR_ERR(spifi->clk_spifi);
}
- spifi->clk_reg = devm_clk_get(&pdev->dev, "reg");
+ spifi->clk_reg = devm_clk_get_enabled(&pdev->dev, "reg");
if (IS_ERR(spifi->clk_reg)) {
- dev_err(&pdev->dev, "reg clock not found\n");
+ dev_err(&pdev->dev, "reg clock not found or unable to enable\n");
return PTR_ERR(spifi->clk_reg);
}
- ret = clk_prepare_enable(spifi->clk_reg);
- if (ret) {
- dev_err(&pdev->dev, "unable to enable reg clock\n");
- return ret;
- }
-
- ret = clk_prepare_enable(spifi->clk_spifi);
- if (ret) {
- dev_err(&pdev->dev, "unable to enable spifi clock\n");
- goto dis_clk_reg;
- }
-
spifi->dev = &pdev->dev;
platform_set_drvdata(pdev, spifi);
@@ -431,24 +418,17 @@ static int nxp_spifi_probe(struct platform_device *pdev)
flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
if (!flash_np) {
dev_err(&pdev->dev, "no SPI flash device to configure\n");
- ret = -ENODEV;
- goto dis_clks;
+ return -ENODEV;
}
ret = nxp_spifi_setup_flash(spifi, flash_np);
of_node_put(flash_np);
if (ret) {
dev_err(&pdev->dev, "unable to setup flash chip\n");
- goto dis_clks;
+ return ret;
}
return 0;
-
-dis_clks:
- clk_disable_unprepare(spifi->clk_spifi);
-dis_clk_reg:
- clk_disable_unprepare(spifi->clk_reg);
- return ret;
}
static int nxp_spifi_remove(struct platform_device *pdev)
@@ -456,8 +436,6 @@ static int nxp_spifi_remove(struct platform_device *pdev)
struct nxp_spifi *spifi = platform_get_drvdata(pdev);
mtd_device_unregister(&spifi->nor.mtd);
- clk_disable_unprepare(spifi->clk_spifi);
- clk_disable_unprepare(spifi->clk_reg);
return 0;
}
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 5f29fac8669a..1b0c6770c14e 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -870,21 +870,22 @@ static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
ret = spi_nor_read_cr(nor, &sr_cr[1]);
if (ret)
return ret;
- } else if (nor->params->quad_enable) {
+ } else if (spi_nor_get_protocol_width(nor->read_proto) == 4 &&
+ spi_nor_get_protocol_width(nor->write_proto) == 4 &&
+ nor->params->quad_enable) {
/*
* If the Status Register 2 Read command (35h) is not
* supported, we should at least be sure we don't
* change the value of the SR2 Quad Enable bit.
*
- * We can safely assume that when the Quad Enable method is
- * set, the value of the QE bit is one, as a consequence of the
- * nor->params->quad_enable() call.
+ * When the Quad Enable method is set and the buswidth is 4, we
+ * can safely assume that the value of the QE bit is one, as a
+ * consequence of the nor->params->quad_enable() call.
*
- * We can safely assume that the Quad Enable bit is present in
- * the Status Register 2 at BIT(1). According to the JESD216
- * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit
- * Write Status (01h) command is available just for the cases
- * in which the QE bit is described in SR2 at BIT(1).
+ * According to the JESD216 revB standard, BFPT DWORDS[15],
+ * bits 22:20, the 16-bit Write Status (01h) command is
+ * available just for the cases in which the QE bit is
+ * described in SR2 at BIT(1).
*/
sr_cr[1] = SR2_QUAD_EN_BIT1;
} else {
@@ -2844,6 +2845,9 @@ static void spi_nor_init_flags(struct spi_nor *nor)
if (of_property_read_bool(np, "broken-flash-reset"))
nor->flags |= SNOR_F_BROKEN_RESET;
+ if (of_property_read_bool(np, "no-wp"))
+ nor->flags |= SNOR_F_NO_WP;
+
if (flags & SPI_NOR_SWP_IS_VOLATILE)
nor->flags |= SNOR_F_SWP_IS_VOLATILE;
@@ -2897,16 +2901,23 @@ static void spi_nor_init_fixup_flags(struct spi_nor *nor)
* SFDP standard, or where SFDP tables are not defined at all.
* Will replace the spi_nor_manufacturer_init_params() method.
*/
-static void spi_nor_late_init_params(struct spi_nor *nor)
+static int spi_nor_late_init_params(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
+ int ret;
if (nor->manufacturer && nor->manufacturer->fixups &&
- nor->manufacturer->fixups->late_init)
- nor->manufacturer->fixups->late_init(nor);
+ nor->manufacturer->fixups->late_init) {
+ ret = nor->manufacturer->fixups->late_init(nor);
+ if (ret)
+ return ret;
+ }
- if (nor->info->fixups && nor->info->fixups->late_init)
- nor->info->fixups->late_init(nor);
+ if (nor->info->fixups && nor->info->fixups->late_init) {
+ ret = nor->info->fixups->late_init(nor);
+ if (ret)
+ return ret;
+ }
/* Default method kept for backward compatibility. */
if (!params->set_4byte_addr_mode)
@@ -2924,6 +2935,8 @@ static void spi_nor_late_init_params(struct spi_nor *nor)
if (nor->info->n_banks > 1)
params->bank_size = div64_u64(params->size, nor->info->n_banks);
+
+ return 0;
}
/**
@@ -3082,22 +3095,20 @@ static int spi_nor_init_params(struct spi_nor *nor)
spi_nor_init_params_deprecated(nor);
}
- spi_nor_late_init_params(nor);
-
- return 0;
+ return spi_nor_late_init_params(nor);
}
-/** spi_nor_octal_dtr_enable() - enable Octal DTR I/O if needed
+/** spi_nor_set_octal_dtr() - enable or disable Octal DTR I/O.
* @nor: pointer to a 'struct spi_nor'
* @enable: whether to enable or disable Octal DTR
*
* Return: 0 on success, -errno otherwise.
*/
-static int spi_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
+static int spi_nor_set_octal_dtr(struct spi_nor *nor, bool enable)
{
int ret;
- if (!nor->params->octal_dtr_enable)
+ if (!nor->params->set_octal_dtr)
return 0;
if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR &&
@@ -3107,7 +3118,7 @@ static int spi_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE))
return 0;
- ret = nor->params->octal_dtr_enable(nor, enable);
+ ret = nor->params->set_octal_dtr(nor, enable);
if (ret)
return ret;
@@ -3168,7 +3179,7 @@ static int spi_nor_init(struct spi_nor *nor)
{
int err;
- err = spi_nor_octal_dtr_enable(nor, true);
+ err = spi_nor_set_octal_dtr(nor, true);
if (err) {
dev_dbg(nor->dev, "octal mode not supported\n");
return err;
@@ -3270,7 +3281,7 @@ static int spi_nor_suspend(struct mtd_info *mtd)
int ret;
/* Disable octal DTR mode if we enabled it. */
- ret = spi_nor_octal_dtr_enable(nor, false);
+ ret = spi_nor_set_octal_dtr(nor, false);
if (ret)
dev_err(nor->dev, "suspend() failed\n");
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index 4fb5ff09c63a..9217379b9cfe 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -132,6 +132,7 @@ enum spi_nor_option_flags {
SNOR_F_SWP_IS_VOLATILE = BIT(13),
SNOR_F_RWW = BIT(14),
SNOR_F_ECC = BIT(15),
+ SNOR_F_NO_WP = BIT(16),
};
struct spi_nor_read_command {
@@ -363,7 +364,7 @@ struct spi_nor_otp {
* @erase_map: the erase map parsed from the SFDP Sector Map Parameter
* Table.
* @otp: SPI NOR OTP info.
- * @octal_dtr_enable: enables SPI NOR octal DTR mode.
+ * @set_octal_dtr: enables or disables SPI NOR octal DTR mode.
* @quad_enable: enables SPI NOR quad mode.
* @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode.
* @convert_addr: converts an absolute address into something the flash
@@ -377,6 +378,7 @@ struct spi_nor_otp {
* than reading the status register to indicate they
* are ready for a new command
* @locking_ops: SPI NOR locking methods.
+ * @priv: flash's private data.
*/
struct spi_nor_flash_parameter {
u64 bank_size;
@@ -397,7 +399,7 @@ struct spi_nor_flash_parameter {
struct spi_nor_erase_map erase_map;
struct spi_nor_otp otp;
- int (*octal_dtr_enable)(struct spi_nor *nor, bool enable);
+ int (*set_octal_dtr)(struct spi_nor *nor, bool enable);
int (*quad_enable)(struct spi_nor *nor);
int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable);
u32 (*convert_addr)(struct spi_nor *nor, u32 addr);
@@ -405,6 +407,7 @@ struct spi_nor_flash_parameter {
int (*ready)(struct spi_nor *nor);
const struct spi_nor_locking_ops *locking_ops;
+ void *priv;
};
/**
@@ -431,7 +434,7 @@ struct spi_nor_fixups {
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt);
int (*post_sfdp)(struct spi_nor *nor);
- void (*late_init)(struct spi_nor *nor);
+ int (*late_init)(struct spi_nor *nor);
};
/**
diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c
index e11536fffe0f..6e163cb5b478 100644
--- a/drivers/mtd/spi-nor/debugfs.c
+++ b/drivers/mtd/spi-nor/debugfs.c
@@ -27,6 +27,7 @@ static const char *const snor_f_names[] = {
SNOR_F_NAME(SWP_IS_VOLATILE),
SNOR_F_NAME(RWW),
SNOR_F_NAME(ECC),
+ SNOR_F_NAME(NO_WP),
};
#undef SNOR_F_NAME
diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c
index 400e2b42f45a..accdf7aa2bfd 100644
--- a/drivers/mtd/spi-nor/issi.c
+++ b/drivers/mtd/spi-nor/issi.c
@@ -29,7 +29,7 @@ static const struct spi_nor_fixups is25lp256_fixups = {
.post_bfpt = is25lp256_post_bfpt_fixups,
};
-static void pm25lv_nor_late_init(struct spi_nor *nor)
+static int pm25lv_nor_late_init(struct spi_nor *nor)
{
struct spi_nor_erase_map *map = &nor->params->erase_map;
int i;
@@ -38,6 +38,8 @@ static void pm25lv_nor_late_init(struct spi_nor *nor)
for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
if (map->erase_type[i].size == 4096)
map->erase_type[i].opcode = SPINOR_OP_BE_4K_PMC;
+
+ return 0;
}
static const struct spi_nor_fixups pm25lv_nor_fixups = {
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
index 04888258e891..eb149e517c1f 100644
--- a/drivers/mtd/spi-nor/macronix.c
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -110,10 +110,12 @@ static void macronix_nor_default_init(struct spi_nor *nor)
nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
}
-static void macronix_nor_late_init(struct spi_nor *nor)
+static int macronix_nor_late_init(struct spi_nor *nor)
{
if (!nor->params->set_4byte_addr_mode)
nor->params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_en4b_ex4b;
+
+ return 0;
}
static const struct spi_nor_fixups macronix_nor_fixups = {
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
index 4b919756a205..6ad080c52ab5 100644
--- a/drivers/mtd/spi-nor/micron-st.c
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -120,7 +120,7 @@ static int micron_st_nor_octal_dtr_dis(struct spi_nor *nor)
return 0;
}
-static int micron_st_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
+static int micron_st_nor_set_octal_dtr(struct spi_nor *nor, bool enable)
{
return enable ? micron_st_nor_octal_dtr_en(nor) :
micron_st_nor_octal_dtr_dis(nor);
@@ -128,7 +128,7 @@ static int micron_st_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
static void mt35xu512aba_default_init(struct spi_nor *nor)
{
- nor->params->octal_dtr_enable = micron_st_nor_octal_dtr_enable;
+ nor->params->set_octal_dtr = micron_st_nor_set_octal_dtr;
}
static int mt35xu512aba_post_sfdp_fixup(struct spi_nor *nor)
@@ -429,7 +429,7 @@ static void micron_st_nor_default_init(struct spi_nor *nor)
nor->params->quad_enable = NULL;
}
-static void micron_st_nor_late_init(struct spi_nor *nor)
+static int micron_st_nor_late_init(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
@@ -438,6 +438,8 @@ static void micron_st_nor_late_init(struct spi_nor *nor)
if (!params->set_4byte_addr_mode)
params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_wren_en4b_ex4b;
+
+ return 0;
}
static const struct spi_nor_fixups micron_st_nor_fixups = {
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index 15f9a80c10b9..709822fced86 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -4,14 +4,19 @@
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/errno.h>
#include <linux/mtd/spi-nor.h>
#include "core.h"
/* flash_info mfr_flag. Used to clear sticky prorietary SR bits. */
#define USE_CLSR BIT(0)
+#define USE_CLPEF BIT(1)
#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */
+#define SPINOR_OP_CLPEF 0x82 /* Clear program/erase failure flags */
#define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */
#define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */
#define SPINOR_REG_CYPRESS_VREG 0x00800000
@@ -19,21 +24,16 @@
#define SPINOR_REG_CYPRESS_STR1V \
(SPINOR_REG_CYPRESS_VREG + SPINOR_REG_CYPRESS_STR1)
#define SPINOR_REG_CYPRESS_CFR1 0x2
-#define SPINOR_REG_CYPRESS_CFR1V \
- (SPINOR_REG_CYPRESS_VREG + SPINOR_REG_CYPRESS_CFR1)
#define SPINOR_REG_CYPRESS_CFR1_QUAD_EN BIT(1) /* Quad Enable */
#define SPINOR_REG_CYPRESS_CFR2 0x3
#define SPINOR_REG_CYPRESS_CFR2V \
(SPINOR_REG_CYPRESS_VREG + SPINOR_REG_CYPRESS_CFR2)
+#define SPINOR_REG_CYPRESS_CFR2_MEMLAT_MASK GENMASK(3, 0)
#define SPINOR_REG_CYPRESS_CFR2_MEMLAT_11_24 0xb
#define SPINOR_REG_CYPRESS_CFR2_ADRBYT BIT(7)
#define SPINOR_REG_CYPRESS_CFR3 0x4
-#define SPINOR_REG_CYPRESS_CFR3V \
- (SPINOR_REG_CYPRESS_VREG + SPINOR_REG_CYPRESS_CFR3)
#define SPINOR_REG_CYPRESS_CFR3_PGSZ BIT(4) /* Page size. */
#define SPINOR_REG_CYPRESS_CFR5 0x6
-#define SPINOR_REG_CYPRESS_CFR5V \
- (SPINOR_REG_CYPRESS_VREG + SPINOR_REG_CYPRESS_CFR5)
#define SPINOR_REG_CYPRESS_CFR5_BIT6 BIT(6)
#define SPINOR_REG_CYPRESS_CFR5_DDR BIT(1)
#define SPINOR_REG_CYPRESS_CFR5_OPI BIT(0)
@@ -57,22 +57,32 @@
SPI_MEM_OP_DUMMY(ndummy, 0), \
SPI_MEM_OP_DATA_IN(1, buf, 0))
-#define SPANSION_CLSR_OP \
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 0), \
+#define SPANSION_OP(opcode) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \
SPI_MEM_OP_NO_ADDR, \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
/**
+ * struct spansion_nor_params - Spansion private parameters.
+ * @clsr: Clear Status Register or Clear Program and Erase Failure Flag
+ * opcode.
+ */
+struct spansion_nor_params {
+ u8 clsr;
+};
+
+/**
* spansion_nor_clear_sr() - Clear the Status Register.
* @nor: pointer to 'struct spi_nor'.
*/
static void spansion_nor_clear_sr(struct spi_nor *nor)
{
+ const struct spansion_nor_params *priv_params = nor->params->priv;
int ret;
if (nor->spimem) {
- struct spi_mem_op op = SPANSION_CLSR_OP;
+ struct spi_mem_op op = SPANSION_OP(priv_params->clsr);
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
@@ -88,11 +98,17 @@ static void spansion_nor_clear_sr(struct spi_nor *nor)
static int cypress_nor_sr_ready_and_clear_reg(struct spi_nor *nor, u64 addr)
{
+ struct spi_nor_flash_parameter *params = nor->params;
struct spi_mem_op op =
- CYPRESS_NOR_RD_ANY_REG_OP(nor->params->addr_mode_nbytes, addr,
+ CYPRESS_NOR_RD_ANY_REG_OP(params->addr_mode_nbytes, addr,
0, nor->bouncebuf);
int ret;
+ if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
+ op.dummy.nbytes = params->rdsr_dummy;
+ op.data.nbytes = 2;
+ }
+
ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
if (ret)
return ret;
@@ -141,18 +157,26 @@ static int cypress_nor_sr_ready_and_clear(struct spi_nor *nor)
return 1;
}
-static int cypress_nor_octal_dtr_en(struct spi_nor *nor)
+static int cypress_nor_set_memlat(struct spi_nor *nor, u64 addr)
{
struct spi_mem_op op;
u8 *buf = nor->bouncebuf;
int ret;
u8 addr_mode_nbytes = nor->params->addr_mode_nbytes;
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes, addr, 0, buf);
+
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
/* Use 24 dummy cycles for memory array reads. */
- *buf = SPINOR_REG_CYPRESS_CFR2_MEMLAT_11_24;
+ *buf &= ~SPINOR_REG_CYPRESS_CFR2_MEMLAT_MASK;
+ *buf |= FIELD_PREP(SPINOR_REG_CYPRESS_CFR2_MEMLAT_MASK,
+ SPINOR_REG_CYPRESS_CFR2_MEMLAT_11_24);
op = (struct spi_mem_op)
- CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes,
- SPINOR_REG_CYPRESS_CFR2V, 1, buf);
+ CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes, addr, 1, buf);
ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
if (ret)
@@ -160,15 +184,41 @@ static int cypress_nor_octal_dtr_en(struct spi_nor *nor)
nor->read_dummy = 24;
+ return 0;
+}
+
+static int cypress_nor_set_octal_dtr_bits(struct spi_nor *nor, u64 addr)
+{
+ struct spi_mem_op op;
+ u8 *buf = nor->bouncebuf;
+
/* Set the octal and DTR enable bits. */
buf[0] = SPINOR_REG_CYPRESS_CFR5_OCT_DTR_EN;
op = (struct spi_mem_op)
- CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes,
- SPINOR_REG_CYPRESS_CFR5V, 1, buf);
+ CYPRESS_NOR_WR_ANY_REG_OP(nor->params->addr_mode_nbytes,
+ addr, 1, buf);
- ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
- if (ret)
- return ret;
+ return spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
+}
+
+static int cypress_nor_octal_dtr_en(struct spi_nor *nor)
+{
+ const struct spi_nor_flash_parameter *params = nor->params;
+ u8 *buf = nor->bouncebuf;
+ u64 addr;
+ int i, ret;
+
+ for (i = 0; i < params->n_dice; i++) {
+ addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR2;
+ ret = cypress_nor_set_memlat(nor, addr);
+ if (ret)
+ return ret;
+
+ addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR5;
+ ret = cypress_nor_set_octal_dtr_bits(nor, addr);
+ if (ret)
+ return ret;
+ }
/* Read flash ID to make sure the switch was successful. */
ret = spi_nor_read_id(nor, nor->addr_nbytes, 3, buf,
@@ -184,11 +234,10 @@ static int cypress_nor_octal_dtr_en(struct spi_nor *nor)
return 0;
}
-static int cypress_nor_octal_dtr_dis(struct spi_nor *nor)
+static int cypress_nor_set_single_spi_bits(struct spi_nor *nor, u64 addr)
{
struct spi_mem_op op;
u8 *buf = nor->bouncebuf;
- int ret;
/*
* The register is 1-byte wide, but 1-byte transactions are not allowed
@@ -198,11 +247,23 @@ static int cypress_nor_octal_dtr_dis(struct spi_nor *nor)
buf[0] = SPINOR_REG_CYPRESS_CFR5_OCT_DTR_DS;
buf[1] = 0;
op = (struct spi_mem_op)
- CYPRESS_NOR_WR_ANY_REG_OP(nor->addr_nbytes,
- SPINOR_REG_CYPRESS_CFR5V, 2, buf);
- ret = spi_nor_write_any_volatile_reg(nor, &op, SNOR_PROTO_8_8_8_DTR);
- if (ret)
- return ret;
+ CYPRESS_NOR_WR_ANY_REG_OP(nor->addr_nbytes, addr, 2, buf);
+ return spi_nor_write_any_volatile_reg(nor, &op, SNOR_PROTO_8_8_8_DTR);
+}
+
+static int cypress_nor_octal_dtr_dis(struct spi_nor *nor)
+{
+ const struct spi_nor_flash_parameter *params = nor->params;
+ u8 *buf = nor->bouncebuf;
+ u64 addr;
+ int i, ret;
+
+ for (i = 0; i < params->n_dice; i++) {
+ addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR5;
+ ret = cypress_nor_set_single_spi_bits(nor, addr);
+ if (ret)
+ return ret;
+ }
/* Read flash ID to make sure the switch was successful. */
ret = spi_nor_read_id(nor, 0, 0, buf, SNOR_PROTO_1_1_1);
@@ -283,10 +344,6 @@ static int cypress_nor_quad_enable_volatile(struct spi_nor *nor)
u8 i;
int ret;
- if (!params->n_dice)
- return cypress_nor_quad_enable_volatile_reg(nor,
- SPINOR_REG_CYPRESS_CFR1V);
-
for (i = 0; i < params->n_dice; i++) {
addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR1;
ret = cypress_nor_quad_enable_volatile_reg(nor, addr);
@@ -408,28 +465,17 @@ static int cypress_nor_set_addr_mode_nbytes(struct spi_nor *nor)
return 0;
}
-static int cypress_nor_get_page_size_single_chip(struct spi_nor *nor)
-{
- struct spi_mem_op op =
- CYPRESS_NOR_RD_ANY_REG_OP(nor->params->addr_mode_nbytes,
- SPINOR_REG_CYPRESS_CFR3V, 0,
- nor->bouncebuf);
- int ret;
-
- ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
- if (ret)
- return ret;
-
- if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR3_PGSZ)
- nor->params->page_size = 512;
- else
- nor->params->page_size = 256;
-
- return 0;
-}
-
-
-static int cypress_nor_get_page_size_mcp(struct spi_nor *nor)
+/**
+ * cypress_nor_get_page_size() - Get flash page size configuration.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * The BFPT table advertises a 512B or 256B page size depending on part but the
+ * page size is actually configurable (with the default being 256B). Read from
+ * CFR3V[4] and set the correct size.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int cypress_nor_get_page_size(struct spi_nor *nor)
{
struct spi_mem_op op =
CYPRESS_NOR_RD_ANY_REG_OP(nor->params->addr_mode_nbytes,
@@ -459,23 +505,6 @@ static int cypress_nor_get_page_size_mcp(struct spi_nor *nor)
return 0;
}
-/**
- * cypress_nor_get_page_size() - Get flash page size configuration.
- * @nor: pointer to a 'struct spi_nor'
- *
- * The BFPT table advertises a 512B or 256B page size depending on part but the
- * page size is actually configurable (with the default being 256B). Read from
- * CFR3V[4] and set the correct size.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int cypress_nor_get_page_size(struct spi_nor *nor)
-{
- if (nor->params->n_dice)
- return cypress_nor_get_page_size_mcp(nor);
- return cypress_nor_get_page_size_single_chip(nor);
-}
-
static void cypress_nor_ecc_init(struct spi_nor *nor)
{
/*
@@ -512,25 +541,39 @@ s25fs256t_post_bfpt_fixup(struct spi_nor *nor,
if (nor->bouncebuf[0])
return -ENODEV;
- return cypress_nor_get_page_size(nor);
+ return 0;
}
static int s25fs256t_post_sfdp_fixup(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
+ /*
+ * S25FS256T does not define the SCCR map, but we would like to use the
+ * same code base for both single and multi chip package devices, thus
+ * set the vreg_offset and n_dice to be able to do so.
+ */
+ params->vreg_offset = devm_kmalloc(nor->dev, sizeof(u32), GFP_KERNEL);
+ if (!params->vreg_offset)
+ return -ENOMEM;
+
+ params->vreg_offset[0] = SPINOR_REG_CYPRESS_VREG;
+ params->n_dice = 1;
+
/* PP_1_1_4_4B is supported but missing in 4BAIT. */
params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_1_4],
SPINOR_OP_PP_1_1_4_4B,
SNOR_PROTO_1_1_4);
- return 0;
+ return cypress_nor_get_page_size(nor);
}
-static void s25fs256t_late_init(struct spi_nor *nor)
+static int s25fs256t_late_init(struct spi_nor *nor)
{
cypress_nor_ecc_init(nor);
+
+ return 0;
}
static struct spi_nor_fixups s25fs256t_fixups = {
@@ -558,10 +601,20 @@ s25hx_t_post_bfpt_fixup(struct spi_nor *nor,
static int s25hx_t_post_sfdp_fixup(struct spi_nor *nor)
{
- struct spi_nor_erase_type *erase_type =
- nor->params->erase_map.erase_type;
+ struct spi_nor_flash_parameter *params = nor->params;
+ struct spi_nor_erase_type *erase_type = params->erase_map.erase_type;
unsigned int i;
+ if (!params->n_dice || !params->vreg_offset) {
+ dev_err(nor->dev, "%s failed. The volatile register offset could not be retrieved from SFDP.\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ /* The 2 Gb parts duplicate info and advertise 4 dice instead of 2. */
+ if (params->size == SZ_256M)
+ params->n_dice = 2;
+
/*
* In some parts, 3byte erase opcodes are advertised by 4BAIT.
* Convert them to 4byte erase opcodes.
@@ -579,25 +632,19 @@ static int s25hx_t_post_sfdp_fixup(struct spi_nor *nor)
}
}
- /* The 2 Gb parts duplicate info and advertise 4 dice instead of 2. */
- if (nor->params->size == SZ_256M)
- nor->params->n_dice = 2;
-
return cypress_nor_get_page_size(nor);
}
-static void s25hx_t_late_init(struct spi_nor *nor)
+static int s25hx_t_late_init(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
/* Fast Read 4B requires mode cycles */
params->reads[SNOR_CMD_READ_FAST].num_mode_clocks = 8;
-
+ params->ready = cypress_nor_sr_ready_and_clear;
cypress_nor_ecc_init(nor);
- /* Replace ready() with multi die version */
- if (params->n_dice)
- params->ready = cypress_nor_sr_ready_and_clear;
+ return 0;
}
static struct spi_nor_fixups s25hx_t_fixups = {
@@ -607,7 +654,7 @@ static struct spi_nor_fixups s25hx_t_fixups = {
};
/**
- * cypress_nor_octal_dtr_enable() - Enable octal DTR on Cypress flashes.
+ * cypress_nor_set_octal_dtr() - Enable or disable octal DTR on Cypress flashes.
* @nor: pointer to a 'struct spi_nor'
* @enable: whether to enable or disable Octal DTR
*
@@ -616,7 +663,7 @@ static struct spi_nor_fixups s25hx_t_fixups = {
*
* Return: 0 on success, -errno otherwise.
*/
-static int cypress_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
+static int cypress_nor_set_octal_dtr(struct spi_nor *nor, bool enable)
{
return enable ? cypress_nor_octal_dtr_en(nor) :
cypress_nor_octal_dtr_dis(nor);
@@ -624,22 +671,34 @@ static int cypress_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
static int s28hx_t_post_sfdp_fixup(struct spi_nor *nor)
{
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ if (!params->n_dice || !params->vreg_offset) {
+ dev_err(nor->dev, "%s failed. The volatile register offset could not be retrieved from SFDP.\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ /* The 2 Gb parts duplicate info and advertise 4 dice instead of 2. */
+ if (params->size == SZ_256M)
+ params->n_dice = 2;
+
/*
* On older versions of the flash the xSPI Profile 1.0 table has the
* 8D-8D-8D Fast Read opcode as 0x00. But it actually should be 0xEE.
*/
- if (nor->params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode == 0)
- nor->params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode =
+ if (params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode == 0)
+ params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode =
SPINOR_OP_CYPRESS_RD_FAST;
/* This flash is also missing the 4-byte Page Program opcode bit. */
- spi_nor_set_pp_settings(&nor->params->page_programs[SNOR_CMD_PP],
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
/*
* Since xSPI Page Program opcode is backward compatible with
* Legacy SPI, use Legacy SPI opcode there as well.
*/
- spi_nor_set_pp_settings(&nor->params->page_programs[SNOR_CMD_PP_8_8_8_DTR],
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_8_8_8_DTR],
SPINOR_OP_PP_4B, SNOR_PROTO_8_8_8_DTR);
/*
@@ -647,7 +706,7 @@ static int s28hx_t_post_sfdp_fixup(struct spi_nor *nor)
* address bytes needed for Read Status Register command as 0 but the
* actual value for that is 4.
*/
- nor->params->rdsr_addr_nbytes = 4;
+ params->rdsr_addr_nbytes = 4;
return cypress_nor_get_page_size(nor);
}
@@ -656,19 +715,18 @@ static int s28hx_t_post_bfpt_fixup(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt)
{
- int ret;
-
- ret = cypress_nor_set_addr_mode_nbytes(nor);
- if (ret)
- return ret;
-
- return 0;
+ return cypress_nor_set_addr_mode_nbytes(nor);
}
-static void s28hx_t_late_init(struct spi_nor *nor)
+static int s28hx_t_late_init(struct spi_nor *nor)
{
- nor->params->octal_dtr_enable = cypress_nor_octal_dtr_enable;
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ params->set_octal_dtr = cypress_nor_set_octal_dtr;
+ params->ready = cypress_nor_sr_ready_and_clear;
cypress_nor_ecc_init(nor);
+
+ return 0;
}
static const struct spi_nor_fixups s28hx_t_fixups = {
@@ -792,47 +850,59 @@ static const struct flash_info spansion_nor_parts[] = {
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
{ "s25fs256t", INFO6(0x342b19, 0x0f0890, 0, 0)
PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
.fixups = &s25fs256t_fixups },
- { "s25hl512t", INFO6(0x342a1a, 0x0f0390, 256 * 1024, 256)
+ { "s25hl512t", INFO6(0x342a1a, 0x0f0390, 0, 0)
PARSE_SFDP
- MFR_FLAGS(USE_CLSR)
+ MFR_FLAGS(USE_CLPEF)
.fixups = &s25hx_t_fixups },
- { "s25hl01gt", INFO6(0x342a1b, 0x0f0390, 256 * 1024, 512)
+ { "s25hl01gt", INFO6(0x342a1b, 0x0f0390, 0, 0)
PARSE_SFDP
- MFR_FLAGS(USE_CLSR)
+ MFR_FLAGS(USE_CLPEF)
.fixups = &s25hx_t_fixups },
{ "s25hl02gt", INFO6(0x342a1c, 0x0f0090, 0, 0)
PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
FLAGS(NO_CHIP_ERASE)
.fixups = &s25hx_t_fixups },
- { "s25hs512t", INFO6(0x342b1a, 0x0f0390, 256 * 1024, 256)
+ { "s25hs512t", INFO6(0x342b1a, 0x0f0390, 0, 0)
PARSE_SFDP
- MFR_FLAGS(USE_CLSR)
+ MFR_FLAGS(USE_CLPEF)
.fixups = &s25hx_t_fixups },
- { "s25hs01gt", INFO6(0x342b1b, 0x0f0390, 256 * 1024, 512)
+ { "s25hs01gt", INFO6(0x342b1b, 0x0f0390, 0, 0)
PARSE_SFDP
- MFR_FLAGS(USE_CLSR)
+ MFR_FLAGS(USE_CLPEF)
.fixups = &s25hx_t_fixups },
{ "s25hs02gt", INFO6(0x342b1c, 0x0f0090, 0, 0)
PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
FLAGS(NO_CHIP_ERASE)
.fixups = &s25hx_t_fixups },
{ "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1)
FLAGS(SPI_NOR_NO_ERASE) },
- { "s28hl512t", INFO(0x345a1a, 0, 256 * 1024, 256)
+ { "s28hl512t", INFO(0x345a1a, 0, 0, 0)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
+ .fixups = &s28hx_t_fixups,
+ },
+ { "s28hl01gt", INFO(0x345a1b, 0, 0, 0)
PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
.fixups = &s28hx_t_fixups,
},
- { "s28hl01gt", INFO(0x345a1b, 0, 256 * 1024, 512)
+ { "s28hs512t", INFO(0x345b1a, 0, 0, 0)
PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
.fixups = &s28hx_t_fixups,
},
- { "s28hs512t", INFO(0x345b1a, 0, 256 * 1024, 256)
+ { "s28hs01gt", INFO(0x345b1b, 0, 0, 0)
PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
.fixups = &s28hx_t_fixups,
},
- { "s28hs01gt", INFO(0x345b1b, 0, 256 * 1024, 512)
+ { "s28hs02gt", INFO(0x345b1c, 0, 0, 0)
PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
.fixups = &s28hx_t_fixups,
},
};
@@ -876,17 +946,35 @@ static int spansion_nor_sr_ready_and_clear(struct spi_nor *nor)
return !(nor->bouncebuf[0] & SR_WIP);
}
-static void spansion_nor_late_init(struct spi_nor *nor)
+static int spansion_nor_late_init(struct spi_nor *nor)
{
- if (nor->params->size > SZ_16M) {
+ struct spi_nor_flash_parameter *params = nor->params;
+ struct spansion_nor_params *priv_params;
+ u8 mfr_flags = nor->info->mfr_flags;
+
+ if (params->size > SZ_16M) {
nor->flags |= SNOR_F_4B_OPCODES;
/* No small sector erase for 4-byte command set */
nor->erase_opcode = SPINOR_OP_SE;
nor->mtd.erasesize = nor->info->sector_size;
}
- if (nor->info->mfr_flags & USE_CLSR)
- nor->params->ready = spansion_nor_sr_ready_and_clear;
+ if (mfr_flags & (USE_CLSR | USE_CLPEF)) {
+ priv_params = devm_kmalloc(nor->dev, sizeof(*priv_params),
+ GFP_KERNEL);
+ if (!priv_params)
+ return -ENOMEM;
+
+ if (mfr_flags & USE_CLSR)
+ priv_params->clsr = SPINOR_OP_CLSR;
+ else if (mfr_flags & USE_CLPEF)
+ priv_params->clsr = SPINOR_OP_CLPEF;
+
+ params->priv = priv_params;
+ params->ready = spansion_nor_sr_ready_and_clear;
+ }
+
+ return 0;
}
static const struct spi_nor_fixups spansion_nor_fixups = {
diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
index 688eb20c763e..197d2c1101ed 100644
--- a/drivers/mtd/spi-nor/sst.c
+++ b/drivers/mtd/spi-nor/sst.c
@@ -49,9 +49,11 @@ static const struct spi_nor_locking_ops sst26vf_nor_locking_ops = {
.is_locked = sst26vf_nor_is_locked,
};
-static void sst26vf_nor_late_init(struct spi_nor *nor)
+static int sst26vf_nor_late_init(struct spi_nor *nor)
{
nor->params->locking_ops = &sst26vf_nor_locking_ops;
+
+ return 0;
}
static const struct spi_nor_fixups sst26vf_nor_fixups = {
@@ -111,6 +113,10 @@ static const struct flash_info sst_nor_parts[] = {
SPI_NOR_QUAD_READ) },
{ "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "sst26vf032b", INFO(0xbf2642, 0, 0, 0)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ PARSE_SFDP
+ .fixups = &sst26vf_nor_fixups },
{ "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
@@ -203,10 +209,12 @@ out:
return ret;
}
-static void sst_nor_late_init(struct spi_nor *nor)
+static int sst_nor_late_init(struct spi_nor *nor)
{
if (nor->info->mfr_flags & SST_WRITE)
nor->mtd._write = sst_nor_write;
+
+ return 0;
}
static const struct spi_nor_fixups sst_nor_fixups = {
diff --git a/drivers/mtd/spi-nor/swp.c b/drivers/mtd/spi-nor/swp.c
index 0ba716e84377..5ab9d5324860 100644
--- a/drivers/mtd/spi-nor/swp.c
+++ b/drivers/mtd/spi-nor/swp.c
@@ -214,8 +214,13 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
status_new = (status_old & ~mask & ~tb_mask) | val;
- /* Disallow further writes if WP pin is asserted */
- status_new |= SR_SRWD;
+ /*
+ * Disallow further writes if WP# pin is neither left floating nor
+ * wrongly tied to GND (that includes internal pull-downs).
+ * WP# pin hard strapped to GND can be a valid use case.
+ */
+ if (!(nor->flags & SNOR_F_NO_WP))
+ status_new |= SR_SRWD;
if (!use_top)
status_new |= tb_mask;
diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
index 834d6ba5ce70..cd99c9a1c568 100644
--- a/drivers/mtd/spi-nor/winbond.c
+++ b/drivers/mtd/spi-nor/winbond.c
@@ -120,8 +120,9 @@ static const struct flash_info winbond_nor_parts[] = {
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K) },
- { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256)
- NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 0, 0)
+ PARSE_SFDP
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
.fixups = &w25q256_fixups },
@@ -216,7 +217,7 @@ static const struct spi_nor_otp_ops winbond_nor_otp_ops = {
.is_locked = spi_nor_otp_is_locked_sr2,
};
-static void winbond_nor_late_init(struct spi_nor *nor)
+static int winbond_nor_late_init(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
@@ -232,6 +233,8 @@ static void winbond_nor_late_init(struct spi_nor *nor)
* from BFPT, if any.
*/
params->set_4byte_addr_mode = winbond_nor_set_4byte_addr_mode;
+
+ return 0;
}
static const struct spi_nor_fixups winbond_nor_fixups = {
diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c
index 7175de8aa336..00d53eae5ee8 100644
--- a/drivers/mtd/spi-nor/xilinx.c
+++ b/drivers/mtd/spi-nor/xilinx.c
@@ -155,10 +155,12 @@ static int xilinx_nor_setup(struct spi_nor *nor,
return 0;
}
-static void xilinx_nor_late_init(struct spi_nor *nor)
+static int xilinx_nor_late_init(struct spi_nor *nor)
{
nor->params->setup = xilinx_nor_setup;
nor->params->ready = xilinx_nor_sr_ready;
+
+ return 0;
}
static const struct spi_nor_fixups xilinx_nor_fixups = {
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 8b91a55ec0d2..8ee51e49fced 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -894,6 +894,13 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
return -EINVAL;
}
+ /* UBI cannot work on flashes with zero erasesize. */
+ if (!mtd->erasesize) {
+ pr_err("ubi: refuse attaching mtd%d - zero erasesize flash is not supported\n",
+ mtd->index);
+ return -EINVAL;
+ }
+
if (ubi_num == UBI_DEV_NUM_AUTO) {
/* Search for an empty slot in the @ubi_devices array */
for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 6673122266b7..42db7679c360 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -2335,13 +2335,27 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
- if (dev->chip_id == KSZ8830_CHIP_ID) {
+ switch (dev->chip_id) {
+ case KSZ8830_CHIP_ID:
/* Silicon Errata Sheet (DS80000830A):
* Port 1 does not work with LinkMD Cable-Testing.
* Port 1 does not respond to received PAUSE control frames.
*/
if (!port)
return MICREL_KSZ8_P1_ERRATA;
+ break;
+ case KSZ9477_CHIP_ID:
+ /* KSZ9477 Errata DS80000754C
+ *
+ * Module 4: Energy Efficient Ethernet (EEE) feature select must
+ * be manually disabled
+ * The EEE feature is enabled by default, but it is not fully
+ * operational. It must be manually disabled through register
+ * controls. If not disabled, the PHY ports can auto-negotiate
+ * to enable EEE, and this feature can cause link drops when
+ * linked to another device supporting EEE.
+ */
+ return MICREL_NO_EEE;
}
return 0;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 52a99d8bada0..ab434a77b059 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2958,14 +2958,16 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
* from the wrong location resulting in the switch booting
* to wrong mode and inoperable.
*/
- mv88e6xxx_g1_wait_eeprom_done(chip);
+ if (chip->info->ops->get_eeprom)
+ mv88e6xxx_g2_eeprom_wait(chip);
gpiod_set_value_cansleep(gpiod, 1);
usleep_range(10000, 20000);
gpiod_set_value_cansleep(gpiod, 0);
usleep_range(10000, 20000);
- mv88e6xxx_g1_wait_eeprom_done(chip);
+ if (chip->info->ops->get_eeprom)
+ mv88e6xxx_g2_eeprom_wait(chip);
}
}
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 2fa55a643591..174c773b38c2 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -75,37 +75,6 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
}
-void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
-{
- const unsigned long timeout = jiffies + 1 * HZ;
- u16 val;
- int err;
-
- /* Wait up to 1 second for the switch to finish reading the
- * EEPROM.
- */
- while (time_before(jiffies, timeout)) {
- err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
- if (err) {
- dev_err(chip->dev, "Error reading status");
- return;
- }
-
- /* If the switch is still resetting, it may not
- * respond on the bus, and so MDIO read returns
- * 0xffff. Differentiate between that, and waiting for
- * the EEPROM to be done by bit 0 being set.
- */
- if (val != 0xffff &&
- val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE))
- return;
-
- usleep_range(1000, 2000);
- }
-
- dev_err(chip->dev, "Timeout waiting for EEPROM done");
-}
-
/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
* Offset 0x02: Switch MAC Address Register Bytes 2 & 3
* Offset 0x03: Switch MAC Address Register Bytes 4 & 5
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index c99ddd117fe6..1095261f5b49 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -282,7 +282,6 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
-void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 937a01f2ba75..b2b5f6ba438f 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -340,7 +340,7 @@ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
* Offset 0x15: EEPROM Addr (for 8-bit data access)
*/
-static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
+int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_BUSY);
int err;
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 7e091965582b..d9434f7cae53 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -365,6 +365,7 @@ int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
int port);
+int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip);
extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops;
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index dee35ba924ad..8c66d3bf61f0 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -132,6 +132,8 @@ struct sja1105_info {
int max_frame_mem;
int num_ports;
bool multiple_cascade_ports;
+ /* Every {port, TXQ} has its own CBS shaper */
+ bool fixed_cbs_mapping;
enum dsa_tag_protocol tag_proto;
const struct sja1105_dynamic_table_ops *dyn_ops;
const struct sja1105_table_ops *static_ops;
@@ -264,6 +266,8 @@ struct sja1105_private {
* the switch doesn't confuse them with one another.
*/
struct mutex mgmt_lock;
+ /* Serializes accesses to the FDB */
+ struct mutex fdb_lock;
/* PTP two-step TX timestamp ID, and its serialization lock */
spinlock_t ts_id_lock;
u8 ts_id;
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index 7729d3f8b7f5..984c0e604e8d 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -1175,18 +1175,15 @@ const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = {
static int
sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
- struct sja1105_dyn_cmd *cmd,
- const struct sja1105_dynamic_table_ops *ops)
+ const struct sja1105_dynamic_table_ops *ops,
+ void *entry, bool check_valident,
+ bool check_errors)
{
u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {};
+ struct sja1105_dyn_cmd cmd = {};
int rc;
- /* We don't _need_ to read the full entry, just the command area which
- * is a fixed SJA1105_SIZE_DYN_CMD. But our cmd_packing() API expects a
- * buffer that contains the full entry too. Additionally, our API
- * doesn't really know how many bytes into the buffer does the command
- * area really begin. So just read back the whole entry.
- */
+ /* Read back the whole entry + command structure. */
rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
ops->packed_size);
if (rc)
@@ -1195,11 +1192,25 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
/* Unpack the command structure, and return it to the caller in case it
* needs to perform further checks on it (VALIDENT).
*/
- memset(cmd, 0, sizeof(*cmd));
- ops->cmd_packing(packed_buf, cmd, UNPACK);
+ ops->cmd_packing(packed_buf, &cmd, UNPACK);
/* Hardware hasn't cleared VALID => still working on it */
- return cmd->valid ? -EAGAIN : 0;
+ if (cmd.valid)
+ return -EAGAIN;
+
+ if (check_valident && !cmd.valident && !(ops->access & OP_VALID_ANYWAY))
+ return -ENOENT;
+
+ if (check_errors && cmd.errors)
+ return -EINVAL;
+
+ /* Don't dereference possibly NULL pointer - maybe caller
+ * only wanted to see whether the entry existed or not.
+ */
+ if (entry)
+ ops->entry_packing(packed_buf, entry, UNPACK);
+
+ return 0;
}
/* Poll the dynamic config entry's control area until the hardware has
@@ -1208,16 +1219,19 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
*/
static int
sja1105_dynamic_config_wait_complete(struct sja1105_private *priv,
- struct sja1105_dyn_cmd *cmd,
- const struct sja1105_dynamic_table_ops *ops)
+ const struct sja1105_dynamic_table_ops *ops,
+ void *entry, bool check_valident,
+ bool check_errors)
{
- int rc;
-
- return read_poll_timeout(sja1105_dynamic_config_poll_valid,
- rc, rc != -EAGAIN,
- SJA1105_DYNAMIC_CONFIG_SLEEP_US,
- SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
- false, priv, cmd, ops);
+ int err, rc;
+
+ err = read_poll_timeout(sja1105_dynamic_config_poll_valid,
+ rc, rc != -EAGAIN,
+ SJA1105_DYNAMIC_CONFIG_SLEEP_US,
+ SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
+ false, priv, ops, entry, check_valident,
+ check_errors);
+ return err < 0 ? err : rc;
}
/* Provides read access to the settings through the dynamic interface
@@ -1286,25 +1300,14 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size);
- if (rc < 0) {
- mutex_unlock(&priv->dynamic_config_lock);
- return rc;
- }
-
- rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
- mutex_unlock(&priv->dynamic_config_lock);
if (rc < 0)
- return rc;
+ goto out;
- if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY))
- return -ENOENT;
+ rc = sja1105_dynamic_config_wait_complete(priv, ops, entry, true, false);
+out:
+ mutex_unlock(&priv->dynamic_config_lock);
- /* Don't dereference possibly NULL pointer - maybe caller
- * only wanted to see whether the entry existed or not.
- */
- if (entry)
- ops->entry_packing(packed_buf, entry, UNPACK);
- return 0;
+ return rc;
}
int sja1105_dynamic_config_write(struct sja1105_private *priv,
@@ -1356,22 +1359,14 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv,
mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size);
- if (rc < 0) {
- mutex_unlock(&priv->dynamic_config_lock);
- return rc;
- }
-
- rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
- mutex_unlock(&priv->dynamic_config_lock);
if (rc < 0)
- return rc;
+ goto out;
- cmd = (struct sja1105_dyn_cmd) {0};
- ops->cmd_packing(packed_buf, &cmd, UNPACK);
- if (cmd.errors)
- return -EINVAL;
+ rc = sja1105_dynamic_config_wait_complete(priv, ops, NULL, false, true);
+out:
+ mutex_unlock(&priv->dynamic_config_lock);
- return 0;
+ return rc;
}
static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly)
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 331bb1c6676a..1a367e64bc3b 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -1798,6 +1798,7 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
+ int rc;
if (!vid) {
switch (db.type) {
@@ -1812,12 +1813,16 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
}
}
- return priv->info->fdb_add_cmd(ds, port, addr, vid);
+ mutex_lock(&priv->fdb_lock);
+ rc = priv->info->fdb_add_cmd(ds, port, addr, vid);
+ mutex_unlock(&priv->fdb_lock);
+
+ return rc;
}
-static int sja1105_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid,
- struct dsa_db db)
+static int __sja1105_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
@@ -1837,6 +1842,20 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
return priv->info->fdb_del_cmd(ds, port, addr, vid);
}
+static int sja1105_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ mutex_lock(&priv->fdb_lock);
+ rc = __sja1105_fdb_del(ds, port, addr, vid, db);
+ mutex_unlock(&priv->fdb_lock);
+
+ return rc;
+}
+
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
@@ -1868,13 +1887,14 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
if (!(l2_lookup.destports & BIT(port)))
continue;
- /* We need to hide the FDB entry for unknown multicast */
- if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST &&
- l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
- continue;
-
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
+ /* Hardware FDB is shared for fdb and mdb, "bridge fdb show"
+ * only wants to see unicast
+ */
+ if (is_multicast_ether_addr(macaddr))
+ continue;
+
/* We need to hide the dsa_8021q VLANs from the user. */
if (vid_is_dsa_8021q(l2_lookup.vlanid))
l2_lookup.vlanid = 0;
@@ -1898,6 +1918,8 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
};
int i;
+ mutex_lock(&priv->fdb_lock);
+
for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
struct sja1105_l2_lookup_entry l2_lookup = {0};
u8 macaddr[ETH_ALEN];
@@ -1911,7 +1933,7 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
if (rc) {
dev_err(ds->dev, "Failed to read FDB: %pe\n",
ERR_PTR(rc));
- return;
+ break;
}
if (!(l2_lookup.destports & BIT(port)))
@@ -1923,14 +1945,16 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
- rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
+ rc = __sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
if (rc) {
dev_err(ds->dev,
"Failed to delete FDB entry %pM vid %lld: %pe\n",
macaddr, l2_lookup.vlanid, ERR_PTR(rc));
- return;
+ break;
}
}
+
+ mutex_unlock(&priv->fdb_lock);
}
static int sja1105_mdb_add(struct dsa_switch *ds, int port,
@@ -2115,11 +2139,36 @@ static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
}
#define BYTES_PER_KBIT (1000LL / 8)
+/* Port 0 (the uC port) does not have CBS shapers */
+#define SJA1110_FIXED_CBS(port, prio) ((((port) - 1) * SJA1105_NUM_TC) + (prio))
+
+static int sja1105_find_cbs_shaper(struct sja1105_private *priv,
+ int port, int prio)
+{
+ int i;
+
+ if (priv->info->fixed_cbs_mapping) {
+ i = SJA1110_FIXED_CBS(port, prio);
+ if (i >= 0 && i < priv->info->num_cbs_shapers)
+ return i;
+
+ return -1;
+ }
+
+ for (i = 0; i < priv->info->num_cbs_shapers; i++)
+ if (priv->cbs[i].port == port && priv->cbs[i].prio == prio)
+ return i;
+
+ return -1;
+}
static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv)
{
int i;
+ if (priv->info->fixed_cbs_mapping)
+ return -1;
+
for (i = 0; i < priv->info->num_cbs_shapers; i++)
if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope)
return i;
@@ -2150,14 +2199,20 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
struct sja1105_cbs_entry *cbs;
+ s64 port_transmit_rate_kbps;
int index;
if (!offload->enable)
return sja1105_delete_cbs_shaper(priv, port, offload->queue);
- index = sja1105_find_unused_cbs_shaper(priv);
- if (index < 0)
- return -ENOSPC;
+ /* The user may be replacing an existing shaper */
+ index = sja1105_find_cbs_shaper(priv, port, offload->queue);
+ if (index < 0) {
+ /* That isn't the case - see if we can allocate a new one */
+ index = sja1105_find_unused_cbs_shaper(priv);
+ if (index < 0)
+ return -ENOSPC;
+ }
cbs = &priv->cbs[index];
cbs->port = port;
@@ -2167,9 +2222,17 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
*/
cbs->credit_hi = offload->hicredit;
cbs->credit_lo = abs(offload->locredit);
- /* User space is in kbits/sec, hardware in bytes/sec */
- cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT;
- cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT);
+ /* User space is in kbits/sec, while the hardware in bytes/sec times
+ * link speed. Since the given offload->sendslope is good only for the
+ * current link speed anyway, and user space is likely to reprogram it
+ * when that changes, don't even bother to track the port's link speed,
+ * but deduce the port transmit rate from idleslope - sendslope.
+ */
+ port_transmit_rate_kbps = offload->idleslope - offload->sendslope;
+ cbs->idle_slope = div_s64(offload->idleslope * BYTES_PER_KBIT,
+ port_transmit_rate_kbps);
+ cbs->send_slope = div_s64(abs(offload->sendslope * BYTES_PER_KBIT),
+ port_transmit_rate_kbps);
/* Convert the negative values from 64-bit 2's complement
* to 32-bit 2's complement (for the case of 0x80000000 whose
* negative is still negative).
@@ -2234,6 +2297,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
int rc, i;
s64 now;
+ mutex_lock(&priv->fdb_lock);
mutex_lock(&priv->mgmt_lock);
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
@@ -2346,6 +2410,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
goto out;
out:
mutex_unlock(&priv->mgmt_lock);
+ mutex_unlock(&priv->fdb_lock);
return rc;
}
@@ -2915,7 +2980,9 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
{
struct sja1105_l2_lookup_entry *l2_lookup;
struct sja1105_table *table;
- int match;
+ int match, rc;
+
+ mutex_lock(&priv->fdb_lock);
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
l2_lookup = table->entries;
@@ -2928,7 +2995,8 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
if (match == table->entry_count) {
NL_SET_ERR_MSG_MOD(extack,
"Could not find FDB entry for unknown multicast");
- return -ENOSPC;
+ rc = -ENOSPC;
+ goto out;
}
if (flags.val & BR_MCAST_FLOOD)
@@ -2936,10 +3004,13 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
else
l2_lookup[match].destports &= ~BIT(to);
- return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
- l2_lookup[match].index,
- &l2_lookup[match],
- true);
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup[match].index,
+ &l2_lookup[match], true);
+out:
+ mutex_unlock(&priv->fdb_lock);
+
+ return rc;
}
static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port,
@@ -3309,6 +3380,7 @@ static int sja1105_probe(struct spi_device *spi)
mutex_init(&priv->ptp_data.lock);
mutex_init(&priv->dynamic_config_lock);
mutex_init(&priv->mgmt_lock);
+ mutex_init(&priv->fdb_lock);
spin_lock_init(&priv->ts_id_lock);
rc = sja1105_parse_dt(priv);
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index 5ce29c8057a4..834b5c1b4db0 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -781,6 +781,7 @@ const struct sja1105_info sja1110a_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
@@ -831,6 +832,7 @@ const struct sja1105_info sja1110b_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
@@ -881,6 +883,7 @@ const struct sja1105_info sja1110c_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
@@ -931,6 +934,7 @@ const struct sja1105_info sja1110d_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index 1c009b485188..ca66b747b7c5 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -1385,7 +1385,7 @@ static int adin1110_fdb_add(struct adin1110_port_priv *port_priv,
return -ENOMEM;
other_port = priv->ports[!port_priv->nr];
- port_rules = adin1110_port_rules(port_priv, false, true);
+ port_rules = adin1110_port_rules(other_port, false, true);
eth_broadcast_addr(mask);
return adin1110_write_mac_address(other_port, mac_nr, (u8 *)fdb->addr,
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index ad32ca81f7ef..f955bde10cf9 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1833,6 +1833,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
return work_done;
error:
+ if (xdp_flags & ENA_XDP_REDIRECT)
+ xdp_do_flush();
+
adapter = netdev_priv(rx_ring->netdev);
if (rc == -ENOSPC) {
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index d63d321f3e7b..41a6098eb0c2 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -528,13 +528,16 @@ void bcmasp_netfilt_suspend(struct bcmasp_intf *intf)
ASP_RX_FILTER_BLK_CTRL);
}
-void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
- u32 *rule_cnt)
+int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
+ u32 *rule_cnt)
{
struct bcmasp_priv *priv = intf->parent;
int j = 0, i;
for (i = 0; i < NUM_NET_FILTERS; i++) {
+ if (j == *rule_cnt)
+ return -EMSGSIZE;
+
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@@ -548,6 +551,8 @@ void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
}
*rule_cnt = j;
+
+ return 0;
}
int bcmasp_netfilt_get_active(struct bcmasp_intf *intf)
@@ -1300,6 +1305,7 @@ static int bcmasp_probe(struct platform_device *pdev)
if (!intf) {
dev_err(dev, "Cannot create eth interface %d\n", i);
bcmasp_remove_intfs(priv);
+ of_node_put(intf_node);
goto of_put_exit;
}
list_add_tail(&intf->list, &priv->intfs);
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
index 5b512f7f5e94..ec90add6b03e 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -577,8 +577,8 @@ void bcmasp_netfilt_release(struct bcmasp_intf *intf,
int bcmasp_netfilt_get_active(struct bcmasp_intf *intf);
-void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
- u32 *rule_cnt);
+int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
+ u32 *rule_cnt);
void bcmasp_netfilt_suspend(struct bcmasp_intf *intf);
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
index c4f1604d5ab3..ce6a3d56fb23 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
@@ -335,7 +335,7 @@ static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
err = bcmasp_flow_get(intf, cmd);
break;
case ETHTOOL_GRXCLSRLALL:
- bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt);
+ err = bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt);
cmd->data = NUM_NET_FILTERS;
break;
default:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 5cc0dbe12132..7551aa8068f8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2614,6 +2614,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
struct rx_cmp_ext *rxcmp1;
u32 cp_cons, tmp_raw_cons;
u32 raw_cons = cpr->cp_raw_cons;
+ bool flush_xdp = false;
u32 rx_pkts = 0;
u8 event = 0;
@@ -2648,6 +2649,8 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
rx_pkts++;
else if (rc == -EBUSY) /* partial completion */
break;
+ if (event & BNXT_REDIRECT_EVENT)
+ flush_xdp = true;
} else if (unlikely(TX_CMP_TYPE(txcmp) ==
CMPL_BASE_TYPE_HWRM_DONE)) {
bnxt_hwrm_handler(bp, txcmp);
@@ -2667,6 +2670,8 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
if (event & BNXT_AGG_EVENT)
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ if (flush_xdp)
+ xdp_do_flush();
if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
napi_complete_done(napi, rx_pkts);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index f178ed9899a9..3ae8e8af8ab3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -3721,6 +3721,60 @@ struct hwrm_func_backing_store_qcaps_v2_output {
u8 valid;
};
+/* hwrm_func_dbr_pacing_qcfg_input (size:128b/16B) */
+struct hwrm_func_dbr_pacing_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_func_dbr_pacing_qcfg_output (size:512b/64B) */
+struct hwrm_func_dbr_pacing_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+#define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED 0x1UL
+ u8 unused_0[7];
+ __le32 dbr_stat_db_fifo_reg;
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK 0x3UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT 0
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC 0x1UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0 0x2UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 0x3UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST \
+ FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK 0xfffffffcUL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT 2
+ __le32 dbr_stat_db_fifo_reg_watermark_mask;
+ u8 dbr_stat_db_fifo_reg_watermark_shift;
+ u8 unused_1[3];
+ __le32 dbr_stat_db_fifo_reg_fifo_room_mask;
+ u8 dbr_stat_db_fifo_reg_fifo_room_shift;
+ u8 unused_2[3];
+ __le32 dbr_throttling_aeq_arm_reg;
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK 0x3UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT 0
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC 0x1UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0 0x2UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 0x3UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST \
+ FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK 0xfffffffcUL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT 2
+ u8 dbr_throttling_aeq_arm_reg_val;
+ u8 unused_3[7];
+ __le32 primary_nq_id;
+ __le32 pacing_threshold;
+ u8 unused_4[7];
+ u8 valid;
+};
+
/* hwrm_func_drv_if_change_input (size:192b/24B) */
struct hwrm_func_drv_if_change_input {
__le16 req_type;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 852eb449ccae..6ba2b9398633 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -345,7 +345,7 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->hw_ring_stats_size = bp->hw_ring_stats_size;
edev->pf_port_id = bp->pf.port_id;
edev->en_state = bp->state;
-
+ edev->bar0 = bp->bar0;
edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 80cbc4b6130a..6ff77f082e6c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -81,6 +81,7 @@ struct bnxt_en_dev {
* mode only. Will be
* updated in resume.
*/
+ void __iomem *bar0;
};
static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev)
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 31f664ee4d77..b940dcd3ace6 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -756,8 +756,6 @@ static void macb_mac_link_up(struct phylink_config *config,
if (rx_pause)
ctrl |= MACB_BIT(PAE);
- macb_set_tx_clk(bp, speed);
-
/* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
* cleared the pipeline and control registers.
*/
@@ -777,6 +775,9 @@ static void macb_mac_link_up(struct phylink_config *config,
spin_unlock_irqrestore(&bp->lock, flags);
+ if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
+ macb_set_tx_clk(bp, speed);
+
/* Enable Rx and Tx; Enable PTP unicast */
ctrl = macb_readl(bp, NCR);
if (gem_has_ptp(bp))
diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c
index 716815dad7d2..65ec1abc9442 100644
--- a/drivers/net/ethernet/engleder/tsnep_ethtool.c
+++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c
@@ -300,10 +300,8 @@ static void tsnep_ethtool_get_channels(struct net_device *netdev,
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
- ch->max_rx = adapter->num_rx_queues;
- ch->max_tx = adapter->num_tx_queues;
- ch->rx_count = adapter->num_rx_queues;
- ch->tx_count = adapter->num_tx_queues;
+ ch->max_combined = adapter->num_queues;
+ ch->combined_count = adapter->num_queues;
}
static int tsnep_ethtool_get_ts_info(struct net_device *netdev,
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index f61bd89734c5..8b992dc9bb52 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -87,8 +87,11 @@ static irqreturn_t tsnep_irq(int irq, void *arg)
/* handle TX/RX queue 0 interrupt */
if ((active & adapter->queue[0].irq_mask) != 0) {
- tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
- napi_schedule(&adapter->queue[0].napi);
+ if (napi_schedule_prep(&adapter->queue[0].napi)) {
+ tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
+ /* schedule after masking to avoid races */
+ __napi_schedule(&adapter->queue[0].napi);
+ }
}
return IRQ_HANDLED;
@@ -99,8 +102,11 @@ static irqreturn_t tsnep_irq_txrx(int irq, void *arg)
struct tsnep_queue *queue = arg;
/* handle TX/RX queue interrupt */
- tsnep_disable_irq(queue->adapter, queue->irq_mask);
- napi_schedule(&queue->napi);
+ if (napi_schedule_prep(&queue->napi)) {
+ tsnep_disable_irq(queue->adapter, queue->irq_mask);
+ /* schedule after masking to avoid races */
+ __napi_schedule(&queue->napi);
+ }
return IRQ_HANDLED;
}
@@ -1728,6 +1734,10 @@ static int tsnep_poll(struct napi_struct *napi, int budget)
if (queue->tx)
complete = tsnep_tx_poll(queue->tx, budget);
+ /* handle case where we are called by netpoll with a budget of 0 */
+ if (unlikely(budget <= 0))
+ return budget;
+
if (queue->rx) {
done = queue->rx->xsk_pool ?
tsnep_rx_poll_zc(queue->rx, napi, budget) :
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index e0a4cb7e3f50..c153dc083aff 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -1402,7 +1402,7 @@ static void enetc_fixup_clear_rss_rfs(struct pci_dev *pdev)
return;
si = enetc_psi_create(pdev);
- if (si)
+ if (!IS_ERR(si))
enetc_psi_destroy(pdev);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF,
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index ea0e38b4d9e9..f281e42a7ef9 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -570,7 +570,10 @@ static int gve_rx_append_frags(struct napi_struct *napi,
if (!skb)
return -1;
- skb_shinfo(rx->ctx.skb_tail)->frag_list = skb;
+ if (rx->ctx.skb_tail == rx->ctx.skb_head)
+ skb_shinfo(rx->ctx.skb_head)->frag_list = skb;
+ else
+ rx->ctx.skb_tail->next = skb;
rx->ctx.skb_tail = skb;
num_frags = 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index a4b43bcd2f0c..aaf1f42624a7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -814,6 +814,7 @@ struct hnae3_tc_info {
u8 max_tc; /* Total number of TCs */
u8 num_tc; /* Total number of enabled TCs */
bool mqprio_active;
+ bool dcb_ets_active;
};
#define HNAE3_MAX_DSCP 64
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index f276b5ecb431..b8508533878b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -1045,6 +1045,7 @@ hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos)
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
struct hnae3_dev_specs *dev_specs = &ae_dev->dev_specs;
struct hnae3_knic_private_info *kinfo = &h->kinfo;
+ struct net_device *dev = kinfo->netdev;
*pos += scnprintf(buf + *pos, len - *pos, "dev_spec:\n");
*pos += scnprintf(buf + *pos, len - *pos, "MAC entry num: %u\n",
@@ -1087,6 +1088,9 @@ hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos)
dev_specs->mc_mac_size);
*pos += scnprintf(buf + *pos, len - *pos, "MAC statistics number: %u\n",
dev_specs->mac_stats_num);
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "TX timeout threshold: %d seconds\n",
+ dev->watchdog_timeo / HZ);
}
static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
@@ -1411,9 +1415,9 @@ int hns3_dbg_init(struct hnae3_handle *handle)
return 0;
out:
- mutex_destroy(&handle->dbgfs_lock);
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
+ mutex_destroy(&handle->dbgfs_lock);
return ret;
}
@@ -1421,6 +1425,9 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
{
u32 i;
+ debugfs_remove_recursive(handle->hnae3_dbgfs);
+ handle->hnae3_dbgfs = NULL;
+
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++)
if (handle->dbgfs_buf[i]) {
kvfree(handle->dbgfs_buf[i]);
@@ -1428,8 +1435,6 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
}
mutex_destroy(&handle->dbgfs_lock);
- debugfs_remove_recursive(handle->hnae3_dbgfs);
- handle->hnae3_dbgfs = NULL;
}
void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index eac2d0573241..cf50368441b7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2103,8 +2103,12 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
*/
if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num &&
!ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) {
+ /* This smp_store_release() pairs with smp_load_aquire() in
+ * hns3_nic_reclaim_desc(). Ensure that the BD valid bit
+ * is updated.
+ */
+ smp_store_release(&ring->last_to_use, ring->next_to_use);
hns3_tx_push_bd(ring, num);
- WRITE_ONCE(ring->last_to_use, ring->next_to_use);
return;
}
@@ -2115,6 +2119,11 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
return;
}
+ /* This smp_store_release() pairs with smp_load_aquire() in
+ * hns3_nic_reclaim_desc(). Ensure that the BD valid bit is updated.
+ */
+ smp_store_release(&ring->last_to_use, ring->next_to_use);
+
if (ring->tqp->mem_base)
hns3_tx_mem_doorbell(ring);
else
@@ -2122,7 +2131,6 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
ring->pending_buf = 0;
- WRITE_ONCE(ring->last_to_use, ring->next_to_use);
}
static void hns3_tsyn(struct net_device *netdev, struct sk_buff *skb,
@@ -3308,8 +3316,6 @@ static void hns3_set_default_feature(struct net_device *netdev)
netdev->priv_flags |= IFF_UNICAST_FLT;
- netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
-
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
@@ -3347,6 +3353,15 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_HW_TC);
netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID;
+
+ /* The device_version V3 hardware can't offload the checksum for IP in
+ * GRE packets, but can do it for NvGRE. So default to disable the
+ * checksum and GSO offload for GRE.
+ */
+ if (ae_dev->dev_version > HNAE3_DEVICE_VERSION_V2) {
+ netdev->features &= ~NETIF_F_GSO_GRE;
+ netdev->features &= ~NETIF_F_GSO_GRE_CSUM;
+ }
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
@@ -3563,9 +3578,8 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
int *bytes, int *pkts, int budget)
{
- /* pair with ring->last_to_use update in hns3_tx_doorbell(),
- * smp_store_release() is not used in hns3_tx_doorbell() because
- * the doorbell operation already have the needed barrier operation.
+ /* This smp_load_acquire() pairs with smp_store_release() in
+ * hns3_tx_doorbell().
*/
int ltu = smp_load_acquire(&ring->last_to_use);
int ntc = ring->next_to_clean;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 36858a72d771..682239f33082 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -773,7 +773,9 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
hns3_get_ksettings(h, cmd);
break;
case HNAE3_MEDIA_TYPE_FIBER:
- if (module_type == HNAE3_MODULE_TYPE_CR)
+ if (module_type == HNAE3_MODULE_TYPE_UNKNOWN)
+ cmd->base.port = PORT_OTHER;
+ else if (module_type == HNAE3_MODULE_TYPE_CR)
cmd->base.port = PORT_DA;
else
cmd->base.port = PORT_FIBRE;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index fad5a5ff3cda..b98301e205f7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -259,7 +259,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
int ret;
if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
- hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
+ h->kinfo.tc_info.mqprio_active)
return -EINVAL;
ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
@@ -275,10 +275,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
}
hclge_tm_schd_info_update(hdev, num_tc);
- if (num_tc > 1)
- hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
- else
- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
+ h->kinfo.tc_info.dcb_ets_active = num_tc > 1;
ret = hclge_ieee_ets_to_tm_info(hdev, ets);
if (ret)
@@ -487,7 +484,7 @@ static u8 hclge_getdcbx(struct hnae3_handle *h)
struct hclge_vport *vport = hclge_get_vport(h);
struct hclge_dev *hdev = vport->back;
- if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
+ if (h->kinfo.tc_info.mqprio_active)
return 0;
return hdev->dcbx_cap;
@@ -611,7 +608,8 @@ static int hclge_setup_tc(struct hnae3_handle *h,
if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
return -EBUSY;
- if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
+ kinfo = &vport->nic.kinfo;
+ if (kinfo->tc_info.dcb_ets_active)
return -EINVAL;
ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt);
@@ -625,7 +623,6 @@ static int hclge_setup_tc(struct hnae3_handle *h,
if (ret)
return ret;
- kinfo = &vport->nic.kinfo;
memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info));
hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt);
kinfo->tc_info.mqprio_active = tc > 0;
@@ -634,13 +631,6 @@ static int hclge_setup_tc(struct hnae3_handle *h,
if (ret)
goto err_out;
- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
-
- if (tc > 1)
- hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
- else
- hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
-
return hclge_notify_init_up(hdev);
err_out:
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index f01a7a9ee02c..ff3f8f424ad9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -1519,7 +1519,7 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
struct hclge_desc desc[3];
int pos = 0;
int ret, i;
- u32 *req;
+ __le32 *req;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
@@ -1544,22 +1544,22 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
tcam_msg.loc);
/* tcam_data0 ~ tcam_data1 */
- req = (u32 *)req1->tcam_data;
+ req = (__le32 *)req1->tcam_data;
for (i = 0; i < 2; i++)
pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", *req++);
+ "%08x\n", le32_to_cpu(*req++));
/* tcam_data2 ~ tcam_data7 */
- req = (u32 *)req2->tcam_data;
+ req = (__le32 *)req2->tcam_data;
for (i = 0; i < 6; i++)
pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", *req++);
+ "%08x\n", le32_to_cpu(*req++));
/* tcam_data8 ~ tcam_data12 */
- req = (u32 *)req3->tcam_data;
+ req = (__le32 *)req3->tcam_data;
for (i = 0; i < 5; i++)
pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", *req++);
+ "%08x\n", le32_to_cpu(*req++));
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 0f50dba6cc47..c42574e29747 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3564,9 +3564,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
u32 regclr)
{
+#define HCLGE_IMP_RESET_DELAY 5
+
switch (event_type) {
case HCLGE_VECTOR0_EVENT_PTP:
case HCLGE_VECTOR0_EVENT_RST:
+ if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B))
+ mdelay(HCLGE_IMP_RESET_DELAY);
+
hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
break;
case HCLGE_VECTOR0_EVENT_MBX:
@@ -7348,6 +7353,12 @@ static int hclge_del_cls_flower(struct hnae3_handle *handle,
ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
NULL, false);
if (ret) {
+ /* if tcam config fail, set rule state to TO_DEL,
+ * so the rule will be deleted when periodic
+ * task being scheduled.
+ */
+ hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL);
+ set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
spin_unlock_bh(&hdev->fd_rule_lock);
return ret;
}
@@ -8824,7 +8835,7 @@ static void hclge_update_overflow_flags(struct hclge_vport *vport,
if (mac_type == HCLGE_MAC_ADDR_UC) {
if (is_all_added)
vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
- else
+ else if (hclge_is_umv_space_full(vport, true))
vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
} else {
if (is_all_added)
@@ -11026,6 +11037,7 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
static void hclge_info_show(struct hclge_dev *hdev)
{
+ struct hnae3_handle *handle = &hdev->vport->nic;
struct device *dev = &hdev->pdev->dev;
dev_info(dev, "PF info begin:\n");
@@ -11042,9 +11054,9 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "This is %s PF\n",
hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
dev_info(dev, "DCB %s\n",
- hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
+ handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable");
dev_info(dev, "MQPRIO %s\n",
- hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
+ handle->kinfo.tc_info.mqprio_active ? "enable" : "disable");
dev_info(dev, "Default tx spare buffer size: %u\n",
hdev->tx_spare_buf_size);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index ec233ec57222..7bc2049b723d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -919,8 +919,6 @@ struct hclge_dev {
#define HCLGE_FLAG_MAIN BIT(0)
#define HCLGE_FLAG_DCB_CAPABLE BIT(1)
-#define HCLGE_FLAG_DCB_ENABLE BIT(2)
-#define HCLGE_FLAG_MQPRIO_ENABLE BIT(3)
u32 flag;
u32 pkt_buf_size; /* Total pf buf size for tx/rx */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 7a2f9233d695..a4d68fb216fb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1855,7 +1855,8 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
unsigned long delta = round_jiffies_relative(HZ);
struct hnae3_handle *handle = &hdev->nic;
- if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
+ if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state) ||
+ test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
return;
if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index 9406237c461e..f81a43d2cdfc 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -456,9 +456,6 @@ int hinic_set_vlan_fliter(struct hinic_dev *nic_dev, u32 en)
u16 out_size = sizeof(vlan_filter);
int err;
- if (!hwdev)
- return -EINVAL;
-
vlan_filter.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
vlan_filter.enable = en;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 832a2ae01950..a8d79ee350f8 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1303,24 +1303,23 @@ static void ibmveth_rx_csum_helper(struct sk_buff *skb,
* the user space for finding a flow. During this process, OVS computes
* checksum on the first packet when CHECKSUM_PARTIAL flag is set.
*
- * So, re-compute TCP pseudo header checksum when configured for
- * trunk mode.
+ * So, re-compute TCP pseudo header checksum.
*/
+
if (iph_proto == IPPROTO_TCP) {
struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen);
+
if (tcph->check == 0x0000) {
/* Recompute TCP pseudo header checksum */
- if (adapter->is_active_trunk) {
- tcphdrlen = skb->len - iphlen;
- if (skb_proto == ETH_P_IP)
- tcph->check =
- ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, tcphdrlen, iph_proto, 0);
- else if (skb_proto == ETH_P_IPV6)
- tcph->check =
- ~csum_ipv6_magic(&iph6->saddr,
- &iph6->daddr, tcphdrlen, iph_proto, 0);
- }
+ tcphdrlen = skb->len - iphlen;
+ if (skb_proto == ETH_P_IP)
+ tcph->check =
+ ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, tcphdrlen, iph_proto, 0);
+ else if (skb_proto == ETH_P_IPV6)
+ tcph->check =
+ ~csum_ipv6_magic(&iph6->saddr,
+ &iph6->daddr, tcphdrlen, iph_proto, 0);
/* Setup SKB fields for checksum offload */
skb_partial_csum_set(skb, iphlen,
offsetof(struct tcphdr, check));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 8ea1a238dcef..d3d6415553ed 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -4475,9 +4475,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
goto error_pvid;
i40e_vlan_stripping_enable(vsi);
- i40e_vc_reset_vf(vf, true);
- /* During reset the VF got a new VSI, so refresh a pointer. */
- vsi = pf->vsi[vf->lan_vsi_idx];
+
/* Locked once because multiple functions below iterate list */
spin_lock_bh(&vsi->mac_filter_hash_lock);
@@ -4563,6 +4561,10 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
*/
vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
+ i40e_vc_reset_vf(vf, true);
+ /* During reset the VF got a new VSI, so refresh a pointer. */
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
if (ret) {
dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 85fba85fbb23..e110ba346185 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -521,7 +521,7 @@ void iavf_down(struct iavf_adapter *adapter);
int iavf_process_config(struct iavf_adapter *adapter);
int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter);
void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags);
-void iavf_schedule_request_stats(struct iavf_adapter *adapter);
+void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags);
void iavf_schedule_finish_config(struct iavf_adapter *adapter);
void iavf_reset(struct iavf_adapter *adapter);
void iavf_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index a34303ad057d..90397293525f 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -362,7 +362,7 @@ static void iavf_get_ethtool_stats(struct net_device *netdev,
unsigned int i;
/* Explicitly request stats refresh */
- iavf_schedule_request_stats(adapter);
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_REQUEST_STATS);
iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 7b300c86ceda..6a2e6d64bc3a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -314,15 +314,13 @@ void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags)
}
/**
- * iavf_schedule_request_stats - Set the flags and schedule statistics request
+ * iavf_schedule_aq_request - Set the flags and schedule aq request
* @adapter: board private structure
- *
- * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly
- * request and refresh ethtool stats
+ * @flags: requested aq flags
**/
-void iavf_schedule_request_stats(struct iavf_adapter *adapter)
+void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags)
{
- adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
+ adapter->aq_required |= flags;
mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
}
@@ -823,7 +821,7 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
list_add_tail(&f->list, &adapter->vlan_filter_list);
f->state = IAVF_VLAN_ADD;
adapter->num_vlan_filters++;
- adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
}
clearout:
@@ -845,7 +843,7 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
f = iavf_find_vlan(adapter, vlan);
if (f) {
f->state = IAVF_VLAN_REMOVE;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
@@ -1421,7 +1419,8 @@ void iavf_down(struct iavf_adapter *adapter)
iavf_clear_fdir_filters(adapter);
iavf_clear_adv_rss_conf(adapter);
- if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
+ if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
+ !(test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))) {
/* cancel any current operation */
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
/* Schedule operations to close down the HW. Don't wait
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index b03426ac932b..db97353efd06 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -2617,12 +2617,14 @@ static int ice_vc_query_rxdid(struct ice_vf *vf)
goto err;
}
- /* Read flexiflag registers to determine whether the
- * corresponding RXDID is configured and supported or not.
- * Since Legacy 16byte descriptor format is not supported,
- * start from Legacy 32byte descriptor.
+ /* RXDIDs supported by DDP package can be read from the register
+ * to get the supported RXDID bitmap. But the legacy 32byte RXDID
+ * is not listed in DDP package, add it in the bitmap manually.
+ * Legacy 16byte descriptor is not supported.
*/
- for (i = ICE_RXDID_LEGACY_1; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
+ rxdid->supported_rxdids |= BIT(ICE_RXDID_LEGACY_1);
+
+ for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0));
if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
& GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 015b78144114..a2b759531cb7 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -34,11 +34,11 @@ struct igb_adapter;
/* TX/RX descriptor defines */
#define IGB_DEFAULT_TXD 256
#define IGB_DEFAULT_TX_WORK 128
-#define IGB_MIN_TXD 80
+#define IGB_MIN_TXD 64
#define IGB_MAX_TXD 4096
#define IGB_DEFAULT_RXD 256
-#define IGB_MIN_RXD 80
+#define IGB_MIN_RXD 64
#define IGB_MAX_RXD 4096
#define IGB_DEFAULT_ITR 3 /* dynamic */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 1ab787ed254d..76b34cee1da3 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3827,8 +3827,11 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs, bool reinit)
}
/* only call pci_enable_sriov() if no VFs are allocated already */
- if (!old_vfs)
+ if (!old_vfs) {
err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
+ if (err)
+ goto err_out;
+ }
goto out;
@@ -3933,8 +3936,9 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;
- /* Virtualization features not supported on i210 family. */
- if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
+ /* Virtualization features not supported on i210 and 82580 family. */
+ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211) ||
+ (hw->mac.type == e1000_82580))
return;
/* Of the below we really only want the effect of getting
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index 57d39ee00b58..7b83678ba83a 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -39,11 +39,11 @@ enum latency_range {
/* Tx/Rx descriptor defines */
#define IGBVF_DEFAULT_TXD 256
#define IGBVF_MAX_TXD 4096
-#define IGBVF_MIN_TXD 80
+#define IGBVF_MIN_TXD 64
#define IGBVF_DEFAULT_RXD 256
#define IGBVF_MAX_RXD 4096
-#define IGBVF_MIN_RXD 80
+#define IGBVF_MIN_RXD 64
#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */
#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 8ebe6999a528..f48f82d5e274 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -379,11 +379,11 @@ static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc)
/* TX/RX descriptor defines */
#define IGC_DEFAULT_TXD 256
#define IGC_DEFAULT_TX_WORK 128
-#define IGC_MIN_TXD 80
+#define IGC_MIN_TXD 64
#define IGC_MAX_TXD 4096
#define IGC_DEFAULT_RXD 256
-#define IGC_MIN_RXD 80
+#define IGC_MIN_RXD 64
#define IGC_MAX_RXD 4096
/* Supported Rx Buffer Sizes */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 93bce729be76..7ab6dd58e400 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -868,6 +868,18 @@ static void igc_ethtool_get_stats(struct net_device *netdev,
spin_unlock(&adapter->stats64_lock);
}
+static int igc_ethtool_get_previous_rx_coalesce(struct igc_adapter *adapter)
+{
+ return (adapter->rx_itr_setting <= 3) ?
+ adapter->rx_itr_setting : adapter->rx_itr_setting >> 2;
+}
+
+static int igc_ethtool_get_previous_tx_coalesce(struct igc_adapter *adapter)
+{
+ return (adapter->tx_itr_setting <= 3) ?
+ adapter->tx_itr_setting : adapter->tx_itr_setting >> 2;
+}
+
static int igc_ethtool_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
@@ -875,17 +887,8 @@ static int igc_ethtool_get_coalesce(struct net_device *netdev,
{
struct igc_adapter *adapter = netdev_priv(netdev);
- if (adapter->rx_itr_setting <= 3)
- ec->rx_coalesce_usecs = adapter->rx_itr_setting;
- else
- ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
-
- if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) {
- if (adapter->tx_itr_setting <= 3)
- ec->tx_coalesce_usecs = adapter->tx_itr_setting;
- else
- ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
- }
+ ec->rx_coalesce_usecs = igc_ethtool_get_previous_rx_coalesce(adapter);
+ ec->tx_coalesce_usecs = igc_ethtool_get_previous_tx_coalesce(adapter);
return 0;
}
@@ -910,8 +913,12 @@ static int igc_ethtool_set_coalesce(struct net_device *netdev,
ec->tx_coalesce_usecs == 2)
return -EINVAL;
- if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
+ if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) &&
+ ec->tx_coalesce_usecs != igc_ethtool_get_previous_tx_coalesce(adapter)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Queue Pair mode enabled, both Rx and Tx coalescing controlled by rx-usecs");
return -EINVAL;
+ }
/* If ITR is disabled, disable DMAC */
if (ec->rx_coalesce_usecs == 0) {
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 293b45717683..98de34d0ce07 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -6491,7 +6491,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
struct igc_ring *ring;
int i, drops;
- if (unlikely(test_bit(__IGC_DOWN, &adapter->state)))
+ if (unlikely(!netif_carrier_ok(dev)))
return -ENETDOWN;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 0310af851086..9339edbd9082 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -979,6 +979,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
+ u32 aflags = adapter->flags;
bool is_l2 = false;
u32 regval;
@@ -996,20 +997,20 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
tsync_rx_mtrl = 0;
- adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG;
- adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
- adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
@@ -1023,8 +1024,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
is_l2 = true;
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_NTP_ALL:
@@ -1035,7 +1036,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
if (hw->mac.type >= ixgbe_mac_X550) {
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL;
config->rx_filter = HWTSTAMP_FILTER_ALL;
- adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
+ aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
break;
}
fallthrough;
@@ -1046,8 +1047,6 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
* Delay_Req messages and hardware does not support
* timestamping all packets => return error
*/
- adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
config->rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
@@ -1079,8 +1078,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
IXGBE_TSYNCRXCTL_TYPE_ALL |
IXGBE_TSYNCRXCTL_TSIP_UT_EN;
config->rx_filter = HWTSTAMP_FILTER_ALL;
- adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
- adapter->flags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER;
+ aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
+ aflags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER;
is_l2 = true;
break;
default:
@@ -1113,6 +1112,9 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
IXGBE_WRITE_FLUSH(hw);
+ /* configure adapter flags only when HW is actually configured */
+ adapter->flags = aflags;
+
/* clear TX/RX time stamp registers, just to be sure */
ixgbe_ptp_clear_tx_timestamp(adapter);
IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index eb74ccddb440..21c3f9b015c8 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -5586,6 +5586,11 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
break;
case ETHTOOL_GRXCLSRLALL:
for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
+ if (loc == info->rule_cnt) {
+ ret = -EMSGSIZE;
+ break;
+ }
+
if (port->rfs_rules[i])
rules[loc++] = i;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 4424de2ffd70..dbc518ff8276 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -734,13 +734,13 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
dma_map_sg_err:
if (si > 0) {
dma_unmap_single(iq->dev, sglist[0].dma_ptr[0],
- sglist[0].len[0], DMA_TO_DEVICE);
- sglist[0].len[0] = 0;
+ sglist[0].len[3], DMA_TO_DEVICE);
+ sglist[0].len[3] = 0;
}
while (si > 1) {
dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3],
- sglist[si >> 2].len[si & 3], DMA_TO_DEVICE);
- sglist[si >> 2].len[si & 3] = 0;
+ sglist[si >> 2].len[3 - (si & 3)], DMA_TO_DEVICE);
+ sglist[si >> 2].len[3 - (si & 3)] = 0;
si--;
}
tx_buffer->gather = 0;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
index 5a520d37bea0..d0adb82d65c3 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
@@ -69,12 +69,12 @@ int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
compl_sg++;
dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0],
- tx_buffer->sglist[0].len[0], DMA_TO_DEVICE);
+ tx_buffer->sglist[0].len[3], DMA_TO_DEVICE);
i = 1; /* entry 0 is main skb, unmapped above */
while (frags--) {
dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
- tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
+ tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE);
i++;
}
@@ -131,13 +131,13 @@ static void octep_iq_free_pending(struct octep_iq *iq)
dma_unmap_single(iq->dev,
tx_buffer->sglist[0].dma_ptr[0],
- tx_buffer->sglist[0].len[0],
+ tx_buffer->sglist[0].len[3],
DMA_TO_DEVICE);
i = 1; /* entry 0 is main skb, unmapped above */
while (frags--) {
dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
- tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
+ tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE);
i++;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
index 2ef57980eb47..21e75ff9f5e7 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
@@ -17,7 +17,21 @@
#define TX_BUFTYPE_NET_SG 2
#define NUM_TX_BUFTYPES 3
-/* Hardware format for Scatter/Gather list */
+/* Hardware format for Scatter/Gather list
+ *
+ * 63 48|47 32|31 16|15 0
+ * -----------------------------------------
+ * | Len 0 | Len 1 | Len 2 | Len 3 |
+ * -----------------------------------------
+ * | Ptr 0 |
+ * -----------------------------------------
+ * | Ptr 1 |
+ * -----------------------------------------
+ * | Ptr 2 |
+ * -----------------------------------------
+ * | Ptr 3 |
+ * -----------------------------------------
+ */
struct octep_tx_sglist_desc {
u16 len[4];
dma_addr_t dma_ptr[4];
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index c2f68678e947..23c2f2ed2fb8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -846,6 +846,21 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
return 0;
}
+static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req,
+ u16 *smq, u16 *smq_mask)
+{
+ struct nix_cn10k_aq_enq_req *aq_req;
+
+ if (!is_rvu_otx2(rvu)) {
+ aq_req = (struct nix_cn10k_aq_enq_req *)req;
+ *smq = aq_req->sq.smq;
+ *smq_mask = aq_req->sq_mask.smq;
+ } else {
+ *smq = req->sq.smq;
+ *smq_mask = req->sq_mask.smq;
+ }
+}
+
static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
@@ -857,6 +872,7 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
struct rvu_block *block;
struct admin_queue *aq;
struct rvu_pfvf *pfvf;
+ u16 smq, smq_mask;
void *ctx, *mask;
bool ena;
u64 cfg;
@@ -928,13 +944,14 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
if (rc)
return rc;
+ nix_get_aq_req_smq(rvu, req, &smq, &smq_mask);
/* Check if SQ pointed SMQ belongs to this PF/VF or not */
if (req->ctype == NIX_AQ_CTYPE_SQ &&
((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
(req->op == NIX_AQ_INSTOP_WRITE &&
- req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
+ req->sq_mask.ena && req->sq.ena && smq_mask))) {
if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
- pcifunc, req->sq.smq))
+ pcifunc, smq))
return NIX_AF_ERR_AQ_ENQUEUE;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index 826f691de259..a4a258da8dd5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -107,12 +107,13 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
}
#define NPA_MAX_BURST 16
-void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
+int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
{
struct otx2_nic *pfvf = dev;
+ int cnt = cq->pool_ptrs;
u64 ptrs[NPA_MAX_BURST];
- int num_ptrs = 1;
dma_addr_t bufptr;
+ int num_ptrs = 1;
/* Refill pool with new buffers */
while (cq->pool_ptrs) {
@@ -131,6 +132,7 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
num_ptrs = 1;
}
}
+ return cnt - cq->pool_ptrs;
}
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
index 8ae96815865e..c1861f7de254 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
@@ -24,7 +24,7 @@ static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu)
return weight;
}
-void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
int cn10k_lmtst_init(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 8511906cb4e2..997fedac3a98 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -574,20 +574,8 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
dma_addr_t *dma)
{
- if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) {
- struct refill_work *work;
- struct delayed_work *dwork;
-
- work = &pfvf->refill_wrk[cq->cq_idx];
- dwork = &work->pool_refill_work;
- /* Schedule a task if no other task is running */
- if (!cq->refill_task_sched) {
- cq->refill_task_sched = true;
- schedule_delayed_work(dwork,
- msecs_to_jiffies(100));
- }
+ if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma)))
return -ENOMEM;
- }
return 0;
}
@@ -1082,39 +1070,20 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
static void otx2_pool_refill_task(struct work_struct *work)
{
struct otx2_cq_queue *cq;
- struct otx2_pool *rbpool;
struct refill_work *wrk;
- int qidx, free_ptrs = 0;
struct otx2_nic *pfvf;
- dma_addr_t bufptr;
+ int qidx;
wrk = container_of(work, struct refill_work, pool_refill_work.work);
pfvf = wrk->pf;
qidx = wrk - pfvf->refill_wrk;
cq = &pfvf->qset.cq[qidx];
- rbpool = cq->rbpool;
- free_ptrs = cq->pool_ptrs;
- while (cq->pool_ptrs) {
- if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
- /* Schedule a WQ if we fails to free atleast half of the
- * pointers else enable napi for this RQ.
- */
- if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) {
- struct delayed_work *dwork;
-
- dwork = &wrk->pool_refill_work;
- schedule_delayed_work(dwork,
- msecs_to_jiffies(100));
- } else {
- cq->refill_task_sched = false;
- }
- return;
- }
- pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
- cq->pool_ptrs--;
- }
cq->refill_task_sched = false;
+
+ local_bh_disable();
+ napi_schedule(wrk->napi);
+ local_bh_enable();
}
int otx2_config_nix_queues(struct otx2_nic *pfvf)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 4c6032ee7800..c04a8ee53a82 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -302,6 +302,7 @@ struct flr_work {
struct refill_work {
struct delayed_work pool_refill_work;
struct otx2_nic *pf;
+ struct napi_struct *napi;
};
/* PTPv2 originTimestamp structure */
@@ -370,7 +371,7 @@ struct dev_hw_ops {
int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);
- void (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
+ int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
void (*aura_freeptr)(void *dev, int aura, u64 buf);
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 70b9065f7d10..6daf4d58c25d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1943,6 +1943,10 @@ int otx2_stop(struct net_device *netdev)
netif_tx_disable(netdev);
+ for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
+ cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
+ devm_kfree(pf->dev, pf->refill_wrk);
+
otx2_free_hw_resources(pf);
otx2_free_cints(pf, pf->hw.cint_cnt);
otx2_disable_napi(pf);
@@ -1950,9 +1954,6 @@ int otx2_stop(struct net_device *netdev)
for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
- for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
- cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
- devm_kfree(pf->dev, pf->refill_wrk);
kfree(qset->sq);
kfree(qset->cq);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index e369baf11530..53b2a4ef5298 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -29,7 +29,8 @@
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
- struct otx2_cq_queue *cq);
+ struct otx2_cq_queue *cq,
+ bool *need_xdp_flush);
static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq)
@@ -337,7 +338,7 @@ static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
struct napi_struct *napi,
struct otx2_cq_queue *cq,
- struct nix_cqe_rx_s *cqe)
+ struct nix_cqe_rx_s *cqe, bool *need_xdp_flush)
{
struct nix_rx_parse_s *parse = &cqe->parse;
struct nix_rx_sg_s *sg = &cqe->sg;
@@ -353,7 +354,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
}
if (pfvf->xdp_prog)
- if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq))
+ if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush))
return;
skb = napi_get_frags(napi);
@@ -388,6 +389,7 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
struct napi_struct *napi,
struct otx2_cq_queue *cq, int budget)
{
+ bool need_xdp_flush = false;
struct nix_cqe_rx_s *cqe;
int processed_cqe = 0;
@@ -409,13 +411,15 @@ process_cqe:
cq->cq_head++;
cq->cq_head &= (cq->cqe_cnt - 1);
- otx2_rcv_pkt_handler(pfvf, napi, cq, cqe);
+ otx2_rcv_pkt_handler(pfvf, napi, cq, cqe, &need_xdp_flush);
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
cqe->sg.seg_addr = 0x00;
processed_cqe++;
cq->pend_cqe--;
}
+ if (need_xdp_flush)
+ xdp_do_flush();
/* Free CQEs to HW */
otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
@@ -424,9 +428,10 @@ process_cqe:
return processed_cqe;
}
-void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
+int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
{
struct otx2_nic *pfvf = dev;
+ int cnt = cq->pool_ptrs;
dma_addr_t bufptr;
while (cq->pool_ptrs) {
@@ -435,6 +440,8 @@ void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
+
+ return cnt - cq->pool_ptrs;
}
static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
@@ -521,6 +528,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
struct otx2_cq_queue *cq;
struct otx2_qset *qset;
struct otx2_nic *pfvf;
+ int filled_cnt = -1;
cq_poll = container_of(napi, struct otx2_cq_poll, napi);
pfvf = (struct otx2_nic *)cq_poll->dev;
@@ -541,7 +549,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
}
if (rx_cq && rx_cq->pool_ptrs)
- pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
+ filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
/* Clear the IRQ */
otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
@@ -561,9 +569,25 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
otx2_config_irq_coalescing(pfvf, i);
}
- /* Re-enable interrupts */
- otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
- BIT_ULL(0));
+ if (unlikely(!filled_cnt)) {
+ struct refill_work *work;
+ struct delayed_work *dwork;
+
+ work = &pfvf->refill_wrk[cq->cq_idx];
+ dwork = &work->pool_refill_work;
+ /* Schedule a task if no other task is running */
+ if (!cq->refill_task_sched) {
+ work->napi = napi;
+ cq->refill_task_sched = true;
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(100));
+ }
+ } else {
+ /* Re-enable interrupts */
+ otx2_write64(pfvf,
+ NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
+ BIT_ULL(0));
+ }
}
return workdone;
}
@@ -1334,7 +1358,8 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
- struct otx2_cq_queue *cq)
+ struct otx2_cq_queue *cq,
+ bool *need_xdp_flush)
{
unsigned char *hard_start, *data;
int qidx = cq->cq_idx;
@@ -1371,8 +1396,10 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
DMA_FROM_DEVICE);
- if (!err)
+ if (!err) {
+ *need_xdp_flush = true;
return true;
+ }
put_page(page);
break;
default:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 9e3bfbe5c480..a82ffca8ce1b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -170,6 +170,6 @@ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);
void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);
-void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
-void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
#endif /* OTX2_TXRX_H */
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index ddec1627f1a7..8d0bacf4e49c 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -2195,7 +2195,7 @@ struct rx_ring_info {
struct sk_buff *skb;
dma_addr_t data_addr;
DEFINE_DMA_UNMAP_LEN(data_size);
- dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT];
+ dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT ?: 1];
};
enum flow_control {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 6ad42e3b488f..20afe79f380a 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2005,11 +2005,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
u8 *data, *new_data;
struct mtk_rx_dma_v2 *rxd, trxd;
int done = 0, bytes = 0;
+ dma_addr_t dma_addr = DMA_MAPPING_ERROR;
while (done < budget) {
unsigned int pktlen, *rxdcsum;
struct net_device *netdev;
- dma_addr_t dma_addr;
u32 hash, reason;
int mac = 0;
@@ -2186,7 +2186,8 @@ release_desc:
else
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
+ likely(dma_addr != DMA_MAPPING_ERROR))
rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
ring->calc_idx = idx;
@@ -2994,6 +2995,9 @@ static int mtk_hwlro_get_fdir_all(struct net_device *dev,
int i;
for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+
if (mac->hwlro_ip[i]) {
rule_locs[cnt] = i;
cnt++;
@@ -3167,8 +3171,8 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
eth->rx_events++;
if (likely(napi_schedule_prep(&eth->rx_napi))) {
- __napi_schedule(&eth->rx_napi);
mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+ __napi_schedule(&eth->rx_napi);
}
return IRQ_HANDLED;
@@ -3180,8 +3184,8 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
eth->tx_events++;
if (likely(napi_schedule_prep(&eth->tx_napi))) {
- __napi_schedule(&eth->tx_napi);
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+ __napi_schedule(&eth->tx_napi);
}
return IRQ_HANDLED;
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index a70a5417c173..a4efbeb16208 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -214,9 +214,11 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
dsa_port = mtk_flow_get_dsa_port(&dev);
if (dev == eth->netdev[0])
- pse_port = 1;
+ pse_port = PSE_GDM1_PORT;
else if (dev == eth->netdev[1])
- pse_port = 2;
+ pse_port = PSE_GDM2_PORT;
+ else if (dev == eth->netdev[2])
+ pse_port = PSE_GDM3_PORT;
else
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
index 92d3952dfa8b..feeb41693c17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
@@ -17,8 +17,10 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
if (err)
return err;
- if (mlx5e_is_eswitch_flow(parse_state->flow))
+ if (mlx5e_is_eswitch_flow(parse_state->flow)) {
attr->esw_attr->split_count = attr->esw_attr->out_count;
+ parse_state->if_count = 0;
+ }
attr->flags |= MLX5_ATTR_FLAG_CT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
index 291193f7120d..f63402c48028 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
@@ -294,6 +294,7 @@ parse_mirred_ovs_master(struct mlx5e_tc_act_parse_state *parse_state,
if (err)
return err;
+ parse_state->if_count = 0;
esw_attr->out_count++;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
index 3b272bbf4c53..368a95fa77d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
@@ -98,8 +98,10 @@ tc_act_parse_pedit(struct mlx5e_tc_act_parse_state *parse_state,
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
esw_attr->split_count = esw_attr->out_count;
+ parse_state->if_count = 0;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
index ad09a8a5f36e..2d1d4a04501b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
@@ -66,6 +66,7 @@ tc_act_parse_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state,
if (err)
return err;
+ parse_state->if_count = 0;
esw_attr->out_count++;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
index c8a3eaf189f6..a13c5e707b83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
@@ -166,6 +166,7 @@ tc_act_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
return err;
esw_attr->split_count = esw_attr->out_count;
+ parse_state->if_count = 0;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
index 310b99230760..f17575b09788 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
@@ -65,8 +65,10 @@ tc_act_parse_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
if (err)
return err;
- if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
attr->esw_attr->split_count = attr->esw_attr->out_count;
+ parse_state->if_count = 0;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 318083690fcd..c24828b688ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -3936,6 +3936,7 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
}
i_split = i + 1;
+ parse_state->if_count = 0;
list_add(&attr->list, &flow->attrs);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 6cd7d6497e10..d4cde6555063 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1276,12 +1276,19 @@ int
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events)
{
+ bool pf_needed;
int ret;
+ pf_needed = mlx5_core_is_ecpf_esw_manager(esw->dev) ||
+ esw->mode == MLX5_ESWITCH_LEGACY;
+
/* Enable PF vport */
- ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF, enabled_events);
- if (ret)
- return ret;
+ if (pf_needed) {
+ ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF,
+ enabled_events);
+ if (ret)
+ return ret;
+ }
/* Enable external host PF HCA */
ret = host_pf_enable_hca(esw->dev);
@@ -1317,7 +1324,8 @@ ec_vf_err:
ecpf_err:
host_pf_disable_hca(esw->dev);
pf_hca_err:
- mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
+ if (pf_needed)
+ mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
return ret;
}
@@ -1335,7 +1343,10 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
}
host_pf_disable_hca(esw->dev);
- mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
+
+ if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
+ esw->mode == MLX5_ESWITCH_LEGACY)
+ mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
}
static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 752fb0dfb111..b296ac52a439 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -3216,26 +3216,47 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
esw_acl_ingress_ofld_cleanup(esw, vport);
}
-static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
+static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
{
- struct mlx5_vport *vport;
+ struct mlx5_vport *uplink, *manager;
+ int ret;
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
- if (IS_ERR(vport))
- return PTR_ERR(vport);
+ uplink = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
+ if (IS_ERR(uplink))
+ return PTR_ERR(uplink);
+
+ ret = esw_vport_create_offloads_acl_tables(esw, uplink);
+ if (ret)
+ return ret;
+
+ manager = mlx5_eswitch_get_vport(esw, esw->manager_vport);
+ if (IS_ERR(manager)) {
+ ret = PTR_ERR(manager);
+ goto err_manager;
+ }
- return esw_vport_create_offloads_acl_tables(esw, vport);
+ ret = esw_vport_create_offloads_acl_tables(esw, manager);
+ if (ret)
+ goto err_manager;
+
+ return 0;
+
+err_manager:
+ esw_vport_destroy_offloads_acl_tables(esw, uplink);
+ return ret;
}
-static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
+static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
- if (IS_ERR(vport))
- return;
+ vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
+ if (!IS_ERR(vport))
+ esw_vport_destroy_offloads_acl_tables(esw, vport);
- esw_vport_destroy_offloads_acl_tables(esw, vport);
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
+ if (!IS_ERR(vport))
+ esw_vport_destroy_offloads_acl_tables(esw, vport);
}
int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
@@ -3280,7 +3301,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
}
esw->fdb_table.offloads.indir = indir;
- err = esw_create_uplink_offloads_acl_tables(esw);
+ err = esw_create_offloads_acl_tables(esw);
if (err)
goto create_acl_err;
@@ -3321,7 +3342,7 @@ create_fdb_err:
create_restore_err:
esw_destroy_offloads_table(esw);
create_offloads_err:
- esw_destroy_uplink_offloads_acl_tables(esw);
+ esw_destroy_offloads_acl_tables(esw);
create_acl_err:
mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
create_indir_err:
@@ -3337,7 +3358,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
esw_destroy_offloads_fdb_tables(esw);
esw_destroy_restore_table(esw);
esw_destroy_offloads_table(esw);
- esw_destroy_uplink_offloads_acl_tables(esw);
+ esw_destroy_offloads_acl_tables(esw);
mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
mutex_destroy(&esw->fdb_table.offloads.vports.lock);
}
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 329e374b9539..43ba71e82260 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -46,6 +46,7 @@ config LAN743X
tristate "LAN743x support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ select PHYLIB
select FIXED_PHY
select CRC16
select CRC32
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.c b/drivers/net/ethernet/microchip/vcap/vcap_api.c
index 300fe1a93dce..ef980e4e5bc2 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api.c
@@ -1021,18 +1021,32 @@ static struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri,
list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list) {
newckf = kmemdup(ckf, sizeof(*newckf), GFP_KERNEL);
if (!newckf)
- return ERR_PTR(-ENOMEM);
+ goto err;
list_add_tail(&newckf->ctrl.list, &duprule->data.keyfields);
}
list_for_each_entry(caf, &ri->data.actionfields, ctrl.list) {
newcaf = kmemdup(caf, sizeof(*newcaf), GFP_KERNEL);
if (!newcaf)
- return ERR_PTR(-ENOMEM);
+ goto err;
list_add_tail(&newcaf->ctrl.list, &duprule->data.actionfields);
}
return duprule;
+
+err:
+ list_for_each_entry_safe(ckf, newckf, &duprule->data.keyfields, ctrl.list) {
+ list_del(&ckf->ctrl.list);
+ kfree(ckf);
+ }
+
+ list_for_each_entry_safe(caf, newcaf, &duprule->data.actionfields, ctrl.list) {
+ list_del(&caf->ctrl.list);
+ kfree(caf);
+ }
+
+ kfree(duprule);
+ return ERR_PTR(-ENOMEM);
}
static void vcap_apply_width(u8 *dst, int width, int bytes)
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
index c07f25e791c7..fe4e166de8a0 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
@@ -243,10 +243,9 @@ static void vcap_test_api_init(struct vcap_admin *admin)
}
/* Helper function to create a rule of a specific size */
-static struct vcap_rule *
-test_vcap_xn_rule_creator(struct kunit *test, int cid, enum vcap_user user,
- u16 priority,
- int id, int size, int expected_addr)
+static void test_vcap_xn_rule_creator(struct kunit *test, int cid,
+ enum vcap_user user, u16 priority,
+ int id, int size, int expected_addr)
{
struct vcap_rule *rule;
struct vcap_rule_internal *ri;
@@ -311,7 +310,7 @@ test_vcap_xn_rule_creator(struct kunit *test, int cid, enum vcap_user user,
ret = vcap_add_rule(rule);
KUNIT_EXPECT_EQ(test, 0, ret);
KUNIT_EXPECT_EQ(test, expected_addr, ri->addr);
- return rule;
+ vcap_free_rule(rule);
}
/* Prepare testing rule deletion */
@@ -995,6 +994,16 @@ static void vcap_api_encode_rule_actionset_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[11]);
}
+static void vcap_free_ckf(struct vcap_rule *rule)
+{
+ struct vcap_client_keyfield *ckf, *next_ckf;
+
+ list_for_each_entry_safe(ckf, next_ckf, &rule->keyfields, ctrl.list) {
+ list_del(&ckf->ctrl.list);
+ kfree(ckf);
+ }
+}
+
static void vcap_api_rule_add_keyvalue_test(struct kunit *test)
{
struct vcap_admin admin = {
@@ -1027,6 +1036,7 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.value);
KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.mask);
+ vcap_free_ckf(rule);
INIT_LIST_HEAD(&rule->keyfields);
ret = vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_1);
@@ -1039,6 +1049,7 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.value);
KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.mask);
+ vcap_free_ckf(rule);
INIT_LIST_HEAD(&rule->keyfields);
ret = vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
@@ -1052,6 +1063,7 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.value);
KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.mask);
+ vcap_free_ckf(rule);
INIT_LIST_HEAD(&rule->keyfields);
ret = vcap_rule_add_key_u32(rule, VCAP_KF_TYPE, 0x98765432, 0xff00ffab);
@@ -1064,6 +1076,7 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, kf->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x98765432, kf->data.u32.value);
KUNIT_EXPECT_EQ(test, 0xff00ffab, kf->data.u32.mask);
+ vcap_free_ckf(rule);
INIT_LIST_HEAD(&rule->keyfields);
ret = vcap_rule_add_key_u128(rule, VCAP_KF_L3_IP6_SIP, &dip);
@@ -1078,6 +1091,18 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, dip.value[idx], kf->data.u128.value[idx]);
for (idx = 0; idx < ARRAY_SIZE(dip.mask); ++idx)
KUNIT_EXPECT_EQ(test, dip.mask[idx], kf->data.u128.mask[idx]);
+ vcap_free_ckf(rule);
+}
+
+static void vcap_free_caf(struct vcap_rule *rule)
+{
+ struct vcap_client_actionfield *caf, *next_caf;
+
+ list_for_each_entry_safe(caf, next_caf,
+ &rule->actionfields, ctrl.list) {
+ list_del(&caf->ctrl.list);
+ kfree(caf);
+ }
}
static void vcap_api_rule_add_actionvalue_test(struct kunit *test)
@@ -1105,6 +1130,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x0, af->data.u1.value);
+ vcap_free_caf(rule);
INIT_LIST_HEAD(&rule->actionfields);
ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_1);
@@ -1116,6 +1142,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x1, af->data.u1.value);
+ vcap_free_caf(rule);
INIT_LIST_HEAD(&rule->actionfields);
ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_ANY);
@@ -1127,6 +1154,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x0, af->data.u1.value);
+ vcap_free_caf(rule);
INIT_LIST_HEAD(&rule->actionfields);
ret = vcap_rule_add_action_u32(rule, VCAP_AF_TYPE, 0x98765432);
@@ -1138,6 +1166,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, VCAP_AF_TYPE, af->ctrl.action);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, af->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x98765432, af->data.u32.value);
+ vcap_free_caf(rule);
INIT_LIST_HEAD(&rule->actionfields);
ret = vcap_rule_add_action_u32(rule, VCAP_AF_MASK_MODE, 0xaabbccdd);
@@ -1149,6 +1178,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, VCAP_AF_MASK_MODE, af->ctrl.action);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, af->ctrl.type);
KUNIT_EXPECT_EQ(test, 0xaabbccdd, af->data.u32.value);
+ vcap_free_caf(rule);
}
static void vcap_api_rule_find_keyset_basic_test(struct kunit *test)
@@ -1408,6 +1438,10 @@ static void vcap_api_encode_rule_test(struct kunit *test)
ret = list_empty(&is2_admin.rules);
KUNIT_EXPECT_EQ(test, false, ret);
KUNIT_EXPECT_EQ(test, 0, ret);
+
+ vcap_enable_lookups(&test_vctrl, &test_netdev, 0, 0,
+ rule->cookie, false);
+
vcap_free_rule(rule);
/* Check that the rule has been freed: tricky to access since this
@@ -1418,6 +1452,8 @@ static void vcap_api_encode_rule_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, true, ret);
ret = list_empty(&rule->actionfields);
KUNIT_EXPECT_EQ(test, true, ret);
+
+ vcap_del_rule(&test_vctrl, &test_netdev, id);
}
static void vcap_api_set_rule_counter_test(struct kunit *test)
@@ -1561,6 +1597,11 @@ static void vcap_api_rule_insert_in_order_test(struct kunit *test)
test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 20, 400, 6, 774);
test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 30, 300, 3, 771);
test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 40, 200, 2, 768);
+
+ vcap_del_rule(&test_vctrl, &test_netdev, 200);
+ vcap_del_rule(&test_vctrl, &test_netdev, 300);
+ vcap_del_rule(&test_vctrl, &test_netdev, 400);
+ vcap_del_rule(&test_vctrl, &test_netdev, 500);
}
static void vcap_api_rule_insert_reverse_order_test(struct kunit *test)
@@ -1619,6 +1660,11 @@ static void vcap_api_rule_insert_reverse_order_test(struct kunit *test)
++idx;
}
KUNIT_EXPECT_EQ(test, 768, admin.last_used_addr);
+
+ vcap_del_rule(&test_vctrl, &test_netdev, 500);
+ vcap_del_rule(&test_vctrl, &test_netdev, 400);
+ vcap_del_rule(&test_vctrl, &test_netdev, 300);
+ vcap_del_rule(&test_vctrl, &test_netdev, 200);
}
static void vcap_api_rule_remove_at_end_test(struct kunit *test)
@@ -1819,6 +1865,9 @@ static void vcap_api_rule_remove_in_front_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, 786, test_init_start);
KUNIT_EXPECT_EQ(test, 8, test_init_count);
KUNIT_EXPECT_EQ(test, 794, admin.last_used_addr);
+
+ vcap_del_rule(&test_vctrl, &test_netdev, 200);
+ vcap_del_rule(&test_vctrl, &test_netdev, 300);
}
static struct kunit_case vcap_api_rule_remove_test_cases[] = {
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 4a16ebff3d1d..48ea4aeeea5d 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -91,63 +91,137 @@ static unsigned int mana_checksum_info(struct sk_buff *skb)
return 0;
}
+static void mana_add_sge(struct mana_tx_package *tp, struct mana_skb_head *ash,
+ int sg_i, dma_addr_t da, int sge_len, u32 gpa_mkey)
+{
+ ash->dma_handle[sg_i] = da;
+ ash->size[sg_i] = sge_len;
+
+ tp->wqe_req.sgl[sg_i].address = da;
+ tp->wqe_req.sgl[sg_i].mem_key = gpa_mkey;
+ tp->wqe_req.sgl[sg_i].size = sge_len;
+}
+
static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
- struct mana_tx_package *tp)
+ struct mana_tx_package *tp, int gso_hs)
{
struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
+ int hsg = 1; /* num of SGEs of linear part */
struct gdma_dev *gd = apc->ac->gdma_dev;
+ int skb_hlen = skb_headlen(skb);
+ int sge0_len, sge1_len = 0;
struct gdma_context *gc;
struct device *dev;
skb_frag_t *frag;
dma_addr_t da;
+ int sg_i;
int i;
gc = gd->gdma_context;
dev = gc->dev;
- da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+ if (gso_hs && gso_hs < skb_hlen) {
+ sge0_len = gso_hs;
+ sge1_len = skb_hlen - gso_hs;
+ } else {
+ sge0_len = skb_hlen;
+ }
+
+ da = dma_map_single(dev, skb->data, sge0_len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, da))
return -ENOMEM;
- ash->dma_handle[0] = da;
- ash->size[0] = skb_headlen(skb);
+ mana_add_sge(tp, ash, 0, da, sge0_len, gd->gpa_mkey);
- tp->wqe_req.sgl[0].address = ash->dma_handle[0];
- tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey;
- tp->wqe_req.sgl[0].size = ash->size[0];
+ if (sge1_len) {
+ sg_i = 1;
+ da = dma_map_single(dev, skb->data + sge0_len, sge1_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, da))
+ goto frag_err;
+
+ mana_add_sge(tp, ash, sg_i, da, sge1_len, gd->gpa_mkey);
+ hsg = 2;
+ }
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ sg_i = hsg + i;
+
frag = &skb_shinfo(skb)->frags[i];
da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
-
if (dma_mapping_error(dev, da))
goto frag_err;
- ash->dma_handle[i + 1] = da;
- ash->size[i + 1] = skb_frag_size(frag);
-
- tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1];
- tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey;
- tp->wqe_req.sgl[i + 1].size = ash->size[i + 1];
+ mana_add_sge(tp, ash, sg_i, da, skb_frag_size(frag),
+ gd->gpa_mkey);
}
return 0;
frag_err:
- for (i = i - 1; i >= 0; i--)
- dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1],
+ for (i = sg_i - 1; i >= hsg; i--)
+ dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
DMA_TO_DEVICE);
- dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
+ for (i = hsg - 1; i >= 0; i--)
+ dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
+ DMA_TO_DEVICE);
return -ENOMEM;
}
+/* Handle the case when GSO SKB linear length is too large.
+ * MANA NIC requires GSO packets to put only the packet header to SGE0.
+ * So, we need 2 SGEs for the skb linear part which contains more than the
+ * header.
+ * Return a positive value for the number of SGEs, or a negative value
+ * for an error.
+ */
+static int mana_fix_skb_head(struct net_device *ndev, struct sk_buff *skb,
+ int gso_hs)
+{
+ int num_sge = 1 + skb_shinfo(skb)->nr_frags;
+ int skb_hlen = skb_headlen(skb);
+
+ if (gso_hs < skb_hlen) {
+ num_sge++;
+ } else if (gso_hs > skb_hlen) {
+ if (net_ratelimit())
+ netdev_err(ndev,
+ "TX nonlinear head: hs:%d, skb_hlen:%d\n",
+ gso_hs, skb_hlen);
+
+ return -EINVAL;
+ }
+
+ return num_sge;
+}
+
+/* Get the GSO packet's header size */
+static int mana_get_gso_hs(struct sk_buff *skb)
+{
+ int gso_hs;
+
+ if (skb->encapsulation) {
+ gso_hs = skb_inner_tcp_all_headers(skb);
+ } else {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ gso_hs = skb_transport_offset(skb) +
+ sizeof(struct udphdr);
+ } else {
+ gso_hs = skb_tcp_all_headers(skb);
+ }
+ }
+
+ return gso_hs;
+}
+
netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
struct mana_port_context *apc = netdev_priv(ndev);
+ int gso_hs = 0; /* zero for non-GSO pkts */
u16 txq_idx = skb_get_queue_mapping(skb);
struct gdma_dev *gd = apc->ac->gdma_dev;
bool ipv4 = false, ipv6 = false;
@@ -159,7 +233,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct mana_txq *txq;
struct mana_cq *cq;
int err, len;
- u16 ihs;
if (unlikely(!apc->port_is_up))
goto tx_drop;
@@ -209,19 +282,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
pkg.wqe_req.client_data_unit = 0;
pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
- WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
-
- if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
- pkg.wqe_req.sgl = pkg.sgl_array;
- } else {
- pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
- sizeof(struct gdma_sge),
- GFP_ATOMIC);
- if (!pkg.sgl_ptr)
- goto tx_drop_count;
-
- pkg.wqe_req.sgl = pkg.sgl_ptr;
- }
if (skb->protocol == htons(ETH_P_IP))
ipv4 = true;
@@ -229,6 +289,26 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ipv6 = true;
if (skb_is_gso(skb)) {
+ int num_sge;
+
+ gso_hs = mana_get_gso_hs(skb);
+
+ num_sge = mana_fix_skb_head(ndev, skb, gso_hs);
+ if (num_sge > 0)
+ pkg.wqe_req.num_sge = num_sge;
+ else
+ goto tx_drop_count;
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ if (skb->encapsulation) {
+ tx_stats->tso_inner_packets++;
+ tx_stats->tso_inner_bytes += skb->len - gso_hs;
+ } else {
+ tx_stats->tso_packets++;
+ tx_stats->tso_bytes += skb->len - gso_hs;
+ }
+ u64_stats_update_end(&tx_stats->syncp);
+
pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
@@ -252,28 +332,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
&ipv6_hdr(skb)->daddr, 0,
IPPROTO_TCP, 0);
}
-
- if (skb->encapsulation) {
- ihs = skb_inner_tcp_all_headers(skb);
- u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->tso_inner_packets++;
- tx_stats->tso_inner_bytes += skb->len - ihs;
- u64_stats_update_end(&tx_stats->syncp);
- } else {
- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
- ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
- } else {
- ihs = skb_tcp_all_headers(skb);
- if (ipv6_has_hopopt_jumbo(skb))
- ihs -= sizeof(struct hop_jumbo_hdr);
- }
-
- u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->tso_packets++;
- tx_stats->tso_bytes += skb->len - ihs;
- u64_stats_update_end(&tx_stats->syncp);
- }
-
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
csum_type = mana_checksum_info(skb);
@@ -296,11 +354,25 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
} else {
/* Can't do offload of this type of checksum */
if (skb_checksum_help(skb))
- goto free_sgl_ptr;
+ goto tx_drop_count;
}
}
- if (mana_map_skb(skb, apc, &pkg)) {
+ WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
+
+ if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
+ pkg.wqe_req.sgl = pkg.sgl_array;
+ } else {
+ pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
+ sizeof(struct gdma_sge),
+ GFP_ATOMIC);
+ if (!pkg.sgl_ptr)
+ goto tx_drop_count;
+
+ pkg.wqe_req.sgl = pkg.sgl_ptr;
+ }
+
+ if (mana_map_skb(skb, apc, &pkg, gso_hs)) {
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->mana_map_err++;
u64_stats_update_end(&tx_stats->syncp);
@@ -1258,11 +1330,16 @@ static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
struct device *dev = gc->dev;
- int i;
+ int hsg, i;
- dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
+ /* Number of SGEs of linear part */
+ hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1;
- for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
+ for (i = 0; i < hsg; i++)
+ dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
+ DMA_TO_DEVICE);
+
+ for (i = hsg; i < skb_shinfo(skb)->nr_frags + hsg; i++)
dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
DMA_TO_DEVICE);
}
@@ -1317,19 +1394,23 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
case CQE_TX_VPORT_DISABLED:
case CQE_TX_VLAN_TAGGING_VIOLATION:
- WARN_ONCE(1, "TX: CQE error %d: ignored.\n",
- cqe_oob->cqe_hdr.cqe_type);
+ if (net_ratelimit())
+ netdev_err(ndev, "TX: CQE error %d\n",
+ cqe_oob->cqe_hdr.cqe_type);
+
apc->eth_stats.tx_cqe_err++;
break;
default:
- /* If the CQE type is unexpected, log an error, assert,
- * and go through the error path.
+ /* If the CQE type is unknown, log an error,
+ * and still free the SKB, update tail, etc.
*/
- WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n",
- cqe_oob->cqe_hdr.cqe_type);
+ if (net_ratelimit())
+ netdev_err(ndev, "TX: unknown CQE type %d\n",
+ cqe_oob->cqe_hdr.cqe_type);
+
apc->eth_stats.tx_cqe_unknown_type++;
- return;
+ break;
}
if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 6aac98bcb9f4..aae4131f146a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -187,6 +187,7 @@ typedef void (*ionic_desc_cb)(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
struct ionic_cq_info *cq_info, void *cb_arg);
+#define IONIC_MAX_BUF_LEN ((u16)-1)
#define IONIC_PAGE_SIZE PAGE_SIZE
#define IONIC_PAGE_SPLIT_SZ (PAGE_SIZE / 2)
#define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 26798fc635db..44466e8c5d77 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -207,7 +207,8 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
return NULL;
}
- frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
+ frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
+ IONIC_PAGE_SIZE - buf_info->page_offset));
len -= frag_len;
dma_sync_single_for_cpu(dev,
@@ -452,7 +453,8 @@ void ionic_rx_fill(struct ionic_queue *q)
/* fill main descriptor - buf[0] */
desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
- frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
+ frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
+ IONIC_PAGE_SIZE - buf_info->page_offset));
desc->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
buf_info++;
@@ -471,7 +473,9 @@ void ionic_rx_fill(struct ionic_queue *q)
}
sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
- frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE - buf_info->page_offset);
+ frag_len = min_t(u16, remain_len, min_t(u32, IONIC_MAX_BUF_LEN,
+ IONIC_PAGE_SIZE -
+ buf_info->page_offset));
sg_elem->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
buf_info++;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 0bfc375161ed..a174c6fc626a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -110,9 +110,9 @@ struct qed_ll2_info {
enum core_tx_dest tx_dest;
u8 tx_stats_en;
bool main_func_queue;
+ struct qed_ll2_cbs cbs;
struct qed_ll2_rx_queue rx_queue;
struct qed_ll2_tx_queue tx_queue;
- struct qed_ll2_cbs cbs;
};
extern const struct qed_ll2_ops qed_ll2_ops_pass;
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 6083b1c8e4fb..fc01ad3f340d 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -4,6 +4,7 @@
* Copyright (C) 2022 Renesas Electronics Corporation
*/
+#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/etherdevice.h>
@@ -799,6 +800,7 @@ static int rswitch_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = napi->dev;
struct rswitch_private *priv;
struct rswitch_device *rdev;
+ unsigned long flags;
int quota = budget;
rdev = netdev_priv(ndev);
@@ -816,10 +818,12 @@ retry:
netif_wake_subqueue(ndev, 0);
- napi_complete(napi);
-
- rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
- rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
+ if (napi_complete_done(napi, budget - quota)) {
+ spin_lock_irqsave(&priv->lock, flags);
+ rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
+ rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
out:
return budget - quota;
@@ -835,8 +839,10 @@ static void rswitch_queue_interrupt(struct net_device *ndev)
struct rswitch_device *rdev = netdev_priv(ndev);
if (napi_schedule_prep(&rdev->napi)) {
+ spin_lock(&rdev->priv->lock);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
+ spin_unlock(&rdev->priv->lock);
__napi_schedule(&rdev->napi);
}
}
@@ -1044,7 +1050,7 @@ static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
{
rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
- MPIC_PSMCS(0x05) | MPIC_PSMHT(0x06));
+ MPIC_PSMCS(etha->psmcs) | MPIC_PSMHT(0x06));
rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45);
}
@@ -1440,14 +1446,17 @@ static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
static int rswitch_open(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
+ unsigned long flags;
phy_start(ndev->phydev);
napi_enable(&rdev->napi);
netif_start_queue(ndev);
+ spin_lock_irqsave(&rdev->priv->lock, flags);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
+ spin_unlock_irqrestore(&rdev->priv->lock, flags);
if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
@@ -1461,6 +1470,7 @@ static int rswitch_stop(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
struct rswitch_gwca_ts_info *ts_info, *ts_info2;
+ unsigned long flags;
netif_tx_stop_all_queues(ndev);
bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
@@ -1476,8 +1486,10 @@ static int rswitch_stop(struct net_device *ndev)
kfree(ts_info);
}
+ spin_lock_irqsave(&rdev->priv->lock, flags);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
+ spin_unlock_irqrestore(&rdev->priv->lock, flags);
phy_stop(ndev->phydev);
napi_disable(&rdev->napi);
@@ -1682,6 +1694,12 @@ static void rswitch_etha_init(struct rswitch_private *priv, int index)
etha->index = index;
etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE;
etha->coma_addr = priv->addr;
+
+ /* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1.
+ * Calculating PSMCS value as MDC frequency = 2.5MHz. So, multiply
+ * both the numerator and the denominator by 10.
+ */
+ etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1;
}
static int rswitch_device_alloc(struct rswitch_private *priv, int index)
@@ -1887,6 +1905,11 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ spin_lock_init(&priv->lock);
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
attr = soc_device_match(rswitch_soc_no_speed_change);
if (attr)
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index 54f397effbc6..04f49a7a5843 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -915,6 +915,7 @@ struct rswitch_etha {
bool external_phy;
struct mii_bus *mii;
phy_interface_t phy_interface;
+ u32 psmcs;
u8 mac_addr[MAX_ADDR_LEN];
int link;
int speed;
@@ -1011,6 +1012,9 @@ struct rswitch_private {
struct rswitch_etha etha[RSWITCH_NUM_PORTS];
struct rswitch_mfwd mfwd;
+ spinlock_t lock; /* lock interrupt registers' control */
+ struct clk *clk;
+
bool etha_no_runtime_change;
bool gwca_halt;
};
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 2375cef577e4..f77a2d3ef37e 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -359,26 +359,36 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
/* Handle a received packet. Second half: Touches packet payload. */
void __efx_rx_packet(struct efx_channel *channel)
{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
struct efx_nic *efx = channel->efx;
struct efx_rx_buffer *rx_buf =
- efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
+ efx_rx_buffer(rx_queue, channel->rx_pkt_index);
u8 *eh = efx_rx_buf_va(rx_buf);
/* Read length from the prefix if necessary. This already
* excludes the length of the prefix itself.
*/
- if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
+ if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN) {
rx_buf->len = le16_to_cpup((__le16 *)
(eh + efx->rx_packet_len_offset));
+ /* A known issue may prevent this being filled in;
+ * if that happens, just drop the packet.
+ * Must do that in the driver since passing a zero-length
+ * packet up to the stack may cause a crash.
+ */
+ if (unlikely(!rx_buf->len)) {
+ efx_free_rx_buffers(rx_queue, rx_buf,
+ channel->rx_pkt_n_frags);
+ channel->n_rx_frm_trunc++;
+ goto out;
+ }
+ }
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
*/
if (unlikely(efx->loopback_selftest)) {
- struct efx_rx_queue *rx_queue;
-
efx_loopback_rx_packet(efx, eh, rx_buf->len);
- rx_queue = efx_channel_get_rx_queue(channel);
efx_free_rx_buffers(rx_queue, rx_buf,
channel->rx_pkt_n_frags);
goto out;
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 047322b04d4f..834f000ba1c4 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -136,6 +136,8 @@ static struct efx_tc_mac_pedit_action *efx_tc_flower_get_mac(struct efx_nic *efx
if (old) {
/* don't need our new entry */
kfree(ped);
+ if (IS_ERR(old)) /* oh dear, it's actually an error */
+ return ERR_CAST(old);
if (!refcount_inc_not_zero(&old->ref))
return ERR_PTR(-EAGAIN);
/* existing entry found, ref taken */
@@ -602,6 +604,8 @@ static int efx_tc_flower_record_encap_match(struct efx_nic *efx,
kfree(encap);
if (pseudo) /* don't need our new pseudo either */
efx_tc_flower_release_encap_match(efx, pseudo);
+ if (IS_ERR(old)) /* oh dear, it's actually an error */
+ return PTR_ERR(old);
/* check old and new em_types are compatible */
switch (old->type) {
case EFX_TC_EM_DIRECT:
@@ -700,6 +704,8 @@ static struct efx_tc_recirc_id *efx_tc_get_recirc_id(struct efx_nic *efx,
if (old) {
/* don't need our new entry */
kfree(rid);
+ if (IS_ERR(old)) /* oh dear, it's actually an error */
+ return ERR_CAST(old);
if (!refcount_inc_not_zero(&old->ref))
return ERR_PTR(-EAGAIN);
/* existing entry found */
@@ -1482,7 +1488,10 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht,
&rule->linkage,
efx_tc_match_action_ht_params);
- if (old) {
+ if (IS_ERR(old)) {
+ rc = PTR_ERR(old);
+ goto release;
+ } else if (old) {
netif_dbg(efx, drv, efx->net_dev,
"Ignoring already-offloaded rule (cookie %lx)\n",
tc->cookie);
@@ -1697,7 +1706,10 @@ static int efx_tc_flower_replace_lhs(struct efx_nic *efx,
old = rhashtable_lookup_get_insert_fast(&efx->tc->lhs_rule_ht,
&rule->linkage,
efx_tc_lhs_rule_ht_params);
- if (old) {
+ if (IS_ERR(old)) {
+ rc = PTR_ERR(old);
+ goto release;
+ } else if (old) {
netif_dbg(efx, drv, efx->net_dev,
"Already offloaded rule (cookie %lx)\n", tc->cookie);
rc = -EEXIST;
@@ -1858,7 +1870,10 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht,
&rule->linkage,
efx_tc_match_action_ht_params);
- if (old) {
+ if (IS_ERR(old)) {
+ rc = PTR_ERR(old);
+ goto release;
+ } else if (old) {
netif_dbg(efx, drv, efx->net_dev,
"Already offloaded rule (cookie %lx)\n", tc->cookie);
NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded");
diff --git a/drivers/net/ethernet/sfc/tc_conntrack.c b/drivers/net/ethernet/sfc/tc_conntrack.c
index 8e06bfbcbea1..44bb57670340 100644
--- a/drivers/net/ethernet/sfc/tc_conntrack.c
+++ b/drivers/net/ethernet/sfc/tc_conntrack.c
@@ -298,7 +298,10 @@ static int efx_tc_ct_replace(struct efx_tc_ct_zone *ct_zone,
old = rhashtable_lookup_get_insert_fast(&efx->tc->ct_ht,
&conn->linkage,
efx_tc_ct_ht_params);
- if (old) {
+ if (IS_ERR(old)) {
+ rc = PTR_ERR(old);
+ goto release;
+ } else if (old) {
netif_dbg(efx, drv, efx->net_dev,
"Already offloaded conntrack (cookie %lx)\n", tc->cookie);
rc = -EEXIST;
@@ -482,6 +485,8 @@ struct efx_tc_ct_zone *efx_tc_ct_register_zone(struct efx_nic *efx, u16 zone,
if (old) {
/* don't need our new entry */
kfree(ct_zone);
+ if (IS_ERR(old)) /* oh dear, it's actually an error */
+ return ERR_CAST(old);
if (!refcount_inc_not_zero(&old->ref))
return ERR_PTR(-EAGAIN);
/* existing entry found */
diff --git a/drivers/net/ethernet/sfc/tc_counters.c b/drivers/net/ethernet/sfc/tc_counters.c
index 0fafb47ea082..c44088424323 100644
--- a/drivers/net/ethernet/sfc/tc_counters.c
+++ b/drivers/net/ethernet/sfc/tc_counters.c
@@ -236,6 +236,8 @@ struct efx_tc_counter_index *efx_tc_flower_get_counter_index(
if (old) {
/* don't need our new entry */
kfree(ctr);
+ if (IS_ERR(old)) /* oh dear, it's actually an error */
+ return ERR_CAST(old);
if (!refcount_inc_not_zero(&old->ref))
return ERR_PTR(-EAGAIN);
/* existing entry found */
diff --git a/drivers/net/ethernet/sfc/tc_encap_actions.c b/drivers/net/ethernet/sfc/tc_encap_actions.c
index 7e8bcdb222ad..87443f9dfd22 100644
--- a/drivers/net/ethernet/sfc/tc_encap_actions.c
+++ b/drivers/net/ethernet/sfc/tc_encap_actions.c
@@ -132,6 +132,8 @@ static int efx_bind_neigh(struct efx_nic *efx,
/* don't need our new entry */
put_net_track(neigh->net, &neigh->ns_tracker);
kfree(neigh);
+ if (IS_ERR(old)) /* oh dear, it's actually an error */
+ return PTR_ERR(old);
if (!refcount_inc_not_zero(&old->ref))
return -EAGAIN;
/* existing entry found, ref taken */
@@ -640,6 +642,8 @@ struct efx_tc_encap_action *efx_tc_flower_create_encap_md(
if (old) {
/* don't need our new entry */
kfree(encap);
+ if (IS_ERR(old)) /* oh dear, it's actually an error */
+ return ERR_CAST(old);
if (!refcount_inc_not_zero(&old->ref))
return ERR_PTR(-EAGAIN);
/* existing entry found, ref taken */
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 403cb397d4d3..1e996c29043d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -70,7 +70,7 @@ struct stmmac_txq_stats {
u64 tx_tso_frames;
u64 tx_tso_nfrags;
struct u64_stats_sync syncp;
-};
+} ____cacheline_aligned_in_smp;
struct stmmac_rxq_stats {
u64 rx_bytes;
@@ -79,7 +79,7 @@ struct stmmac_rxq_stats {
u64 rx_normal_irq_n;
u64 napi_poll;
struct u64_stats_sync syncp;
-};
+} ____cacheline_aligned_in_smp;
/* Extra statistic and debug information exposed by ethtool */
struct stmmac_extra_stats {
@@ -202,6 +202,9 @@ struct stmmac_extra_stats {
unsigned long mtl_est_hlbf;
unsigned long mtl_est_btre;
unsigned long mtl_est_btrlm;
+ /* per queue statistics */
+ struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
+ struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
unsigned long rx_dropped;
unsigned long rx_errors;
unsigned long tx_dropped;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 26ea8c687881..a0e276783e65 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -104,6 +104,7 @@ struct stm32_ops {
int (*parse_data)(struct stm32_dwmac *dwmac,
struct device *dev);
u32 syscfg_eth_mask;
+ bool clk_rx_enable_in_suspend;
};
static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
@@ -121,7 +122,8 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
if (ret)
return ret;
- if (!dwmac->dev->power.is_suspended) {
+ if (!dwmac->ops->clk_rx_enable_in_suspend ||
+ !dwmac->dev->power.is_suspended) {
ret = clk_prepare_enable(dwmac->clk_rx);
if (ret) {
clk_disable_unprepare(dwmac->clk_tx);
@@ -513,7 +515,8 @@ static struct stm32_ops stm32mp1_dwmac_data = {
.suspend = stm32mp1_suspend,
.resume = stm32mp1_resume,
.parse_data = stm32mp1_parse_data,
- .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK
+ .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK,
+ .clk_rx_enable_in_suspend = true
};
static const struct of_device_id stm32_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 01e77368eef1..465ff1fd4785 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -441,8 +441,8 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
int ret = 0;
u32 v;
@@ -455,9 +455,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
if (v & EMAC_TX_INT) {
ret |= handle_tx;
- u64_stats_update_begin(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_normal_irq_n++;
- u64_stats_update_end(&tx_q->txq_stats.syncp);
+ u64_stats_update_begin(&txq_stats->syncp);
+ txq_stats->tx_normal_irq_n++;
+ u64_stats_update_end(&txq_stats->syncp);
}
if (v & EMAC_TX_DMA_STOP_INT)
@@ -479,9 +479,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
if (v & EMAC_RX_INT) {
ret |= handle_rx;
- u64_stats_update_begin(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_normal_irq_n++;
- u64_stats_update_end(&rx_q->rxq_stats.syncp);
+ u64_stats_update_begin(&rxq_stats->syncp);
+ rxq_stats->rx_normal_irq_n++;
+ u64_stats_update_end(&rxq_stats->syncp);
}
if (v & EMAC_RX_BUF_UA_INT)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 980e5f8a37ec..9470d3fd2ded 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -171,8 +171,8 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan));
u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
int ret = 0;
if (dir == DMA_DIR_RX)
@@ -201,15 +201,15 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
}
/* TX/RX NORMAL interrupts */
if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
- u64_stats_update_begin(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_normal_irq_n++;
- u64_stats_update_end(&rx_q->rxq_stats.syncp);
+ u64_stats_update_begin(&rxq_stats->syncp);
+ rxq_stats->rx_normal_irq_n++;
+ u64_stats_update_end(&rxq_stats->syncp);
ret |= handle_rx;
}
if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
- u64_stats_update_begin(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_normal_irq_n++;
- u64_stats_update_end(&tx_q->txq_stats.syncp);
+ u64_stats_update_begin(&txq_stats->syncp);
+ txq_stats->tx_normal_irq_n++;
+ u64_stats_update_end(&txq_stats->syncp);
ret |= handle_tx;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index aaa09b16b016..7907d62d3437 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -162,8 +162,8 @@ static void show_rx_process_state(unsigned int status)
int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan, u32 dir)
{
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
int ret = 0;
/* read the status register (CSR5) */
u32 intr_status = readl(ioaddr + DMA_STATUS);
@@ -215,16 +215,16 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 value = readl(ioaddr + DMA_INTR_ENA);
/* to schedule NAPI on real RIE event. */
if (likely(value & DMA_INTR_ENA_RIE)) {
- u64_stats_update_begin(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_normal_irq_n++;
- u64_stats_update_end(&rx_q->rxq_stats.syncp);
+ u64_stats_update_begin(&rxq_stats->syncp);
+ rxq_stats->rx_normal_irq_n++;
+ u64_stats_update_end(&rxq_stats->syncp);
ret |= handle_rx;
}
}
if (likely(intr_status & DMA_STATUS_TI)) {
- u64_stats_update_begin(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_normal_irq_n++;
- u64_stats_update_end(&tx_q->txq_stats.syncp);
+ u64_stats_update_begin(&txq_stats->syncp);
+ txq_stats->tx_normal_irq_n++;
+ u64_stats_update_end(&txq_stats->syncp);
ret |= handle_tx;
}
if (unlikely(intr_status & DMA_STATUS_ERI))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index fa69d64a8694..3cde695fec91 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -337,8 +337,8 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
int ret = 0;
@@ -367,15 +367,15 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
/* TX/RX NORMAL interrupts */
if (likely(intr_status & XGMAC_NIS)) {
if (likely(intr_status & XGMAC_RI)) {
- u64_stats_update_begin(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_normal_irq_n++;
- u64_stats_update_end(&rx_q->rxq_stats.syncp);
+ u64_stats_update_begin(&rxq_stats->syncp);
+ rxq_stats->rx_normal_irq_n++;
+ u64_stats_update_end(&rxq_stats->syncp);
ret |= handle_rx;
}
if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
- u64_stats_update_begin(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_normal_irq_n++;
- u64_stats_update_end(&tx_q->txq_stats.syncp);
+ u64_stats_update_begin(&txq_stats->syncp);
+ txq_stats->tx_normal_irq_n++;
+ u64_stats_update_end(&txq_stats->syncp);
ret |= handle_tx;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 3401e888a9f6..cd7a9768de5f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -78,7 +78,6 @@ struct stmmac_tx_queue {
dma_addr_t dma_tx_phy;
dma_addr_t tx_tail_addr;
u32 mss;
- struct stmmac_txq_stats txq_stats;
};
struct stmmac_rx_buffer {
@@ -123,7 +122,6 @@ struct stmmac_rx_queue {
unsigned int len;
unsigned int error;
} state;
- struct stmmac_rxq_stats rxq_stats;
};
struct stmmac_channel {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index b7ac7abecdd3..6aa5c0556d22 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -548,14 +548,14 @@ static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
pos = data;
for (q = 0; q < tx_cnt; q++) {
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[q];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
struct stmmac_txq_stats snapshot;
data = pos;
do {
- start = u64_stats_fetch_begin(&tx_q->txq_stats.syncp);
- snapshot = tx_q->txq_stats;
- } while (u64_stats_fetch_retry(&tx_q->txq_stats.syncp, start));
+ start = u64_stats_fetch_begin(&txq_stats->syncp);
+ snapshot = *txq_stats;
+ } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
p = (char *)&snapshot + offsetof(struct stmmac_txq_stats, tx_pkt_n);
for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
@@ -566,14 +566,14 @@ static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
pos = data;
for (q = 0; q < rx_cnt; q++) {
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[q];
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
struct stmmac_rxq_stats snapshot;
data = pos;
do {
- start = u64_stats_fetch_begin(&rx_q->rxq_stats.syncp);
- snapshot = rx_q->rxq_stats;
- } while (u64_stats_fetch_retry(&rx_q->rxq_stats.syncp, start));
+ start = u64_stats_fetch_begin(&rxq_stats->syncp);
+ snapshot = *rxq_stats;
+ } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
p = (char *)&snapshot + offsetof(struct stmmac_rxq_stats, rx_pkt_n);
for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
@@ -637,14 +637,14 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
pos = j;
for (i = 0; i < rx_queues_count; i++) {
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[i];
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[i];
struct stmmac_rxq_stats snapshot;
j = pos;
do {
- start = u64_stats_fetch_begin(&rx_q->rxq_stats.syncp);
- snapshot = rx_q->rxq_stats;
- } while (u64_stats_fetch_retry(&rx_q->rxq_stats.syncp, start));
+ start = u64_stats_fetch_begin(&rxq_stats->syncp);
+ snapshot = *rxq_stats;
+ } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
data[j++] += snapshot.rx_pkt_n;
data[j++] += snapshot.rx_normal_irq_n;
@@ -654,14 +654,14 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
pos = j;
for (i = 0; i < tx_queues_count; i++) {
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[i];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[i];
struct stmmac_txq_stats snapshot;
j = pos;
do {
- start = u64_stats_fetch_begin(&tx_q->txq_stats.syncp);
- snapshot = tx_q->txq_stats;
- } while (u64_stats_fetch_retry(&tx_q->txq_stats.syncp, start));
+ start = u64_stats_fetch_begin(&txq_stats->syncp);
+ snapshot = *txq_stats;
+ } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
data[j++] += snapshot.tx_pkt_n;
data[j++] += snapshot.tx_normal_irq_n;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 9a3182b9e767..ed1a5a31a491 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2426,6 +2426,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
{
struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
struct xsk_buff_pool *pool = tx_q->xsk_pool;
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc = NULL;
@@ -2505,9 +2506,9 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
entry = tx_q->cur_tx;
}
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_set_ic_bit += tx_set_ic_bit;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+ txq_stats->tx_set_ic_bit += tx_set_ic_bit;
+ u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
if (tx_desc) {
stmmac_flush_tx_descriptors(priv, queue);
@@ -2547,6 +2548,7 @@ static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
unsigned int bytes_compl = 0, pkts_compl = 0;
unsigned int entry, xmits = 0, count = 0;
u32 tx_packets = 0, tx_errors = 0;
@@ -2704,15 +2706,13 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
/* We still have pending packets, let's call for a new scheduling */
if (tx_q->dirty_tx != tx_q->cur_tx)
- hrtimer_start(&tx_q->txtimer,
- STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
- HRTIMER_MODE_REL);
+ stmmac_tx_timer_arm(priv, queue);
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_packets += tx_packets;
- tx_q->txq_stats.tx_pkt_n += tx_packets;
- tx_q->txq_stats.tx_clean++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+ txq_stats->tx_packets += tx_packets;
+ txq_stats->tx_pkt_n += tx_packets;
+ txq_stats->tx_clean++;
+ u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
priv->xstats.tx_errors += tx_errors;
@@ -2995,9 +2995,13 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ u32 tx_coal_timer = priv->tx_coal_timer[queue];
+
+ if (!tx_coal_timer)
+ return;
hrtimer_start(&tx_q->txtimer,
- STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
+ STMMAC_COAL_TIMER(tx_coal_timer),
HRTIMER_MODE_REL);
}
@@ -4112,6 +4116,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
int nfrags = skb_shinfo(skb)->nr_frags;
u32 queue = skb_get_queue_mapping(skb);
unsigned int first_entry, tx_packets;
+ struct stmmac_txq_stats *txq_stats;
int tmp_pay_len = 0, first_tx;
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
@@ -4122,6 +4127,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
int i;
tx_q = &priv->dma_conf.tx_queue[queue];
+ txq_stats = &priv->xstats.txq_stats[queue];
first_tx = tx_q->cur_tx;
/* Compute header lengths */
@@ -4280,13 +4286,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_bytes += skb->len;
- tx_q->txq_stats.tx_tso_frames++;
- tx_q->txq_stats.tx_tso_nfrags += nfrags;
+ flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+ txq_stats->tx_bytes += skb->len;
+ txq_stats->tx_tso_frames++;
+ txq_stats->tx_tso_nfrags += nfrags;
if (set_ic)
- tx_q->txq_stats.tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ txq_stats->tx_set_ic_bit++;
+ u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -4357,6 +4363,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
int gso = skb_shinfo(skb)->gso_type;
+ struct stmmac_txq_stats *txq_stats;
struct dma_edesc *tbs_desc = NULL;
struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q;
@@ -4366,6 +4373,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr_t des;
tx_q = &priv->dma_conf.tx_queue[queue];
+ txq_stats = &priv->xstats.txq_stats[queue];
first_tx = tx_q->cur_tx;
if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
@@ -4517,11 +4525,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_bytes += skb->len;
+ flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+ txq_stats->tx_bytes += skb->len;
if (set_ic)
- tx_q->txq_stats.tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ txq_stats->tx_set_ic_bit++;
+ u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -4728,6 +4736,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
struct xdp_frame *xdpf, bool dma_map)
{
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc;
@@ -4787,9 +4796,9 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
unsigned long flags;
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, tx_desc);
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+ txq_stats->tx_set_ic_bit++;
+ u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
}
stmmac_enable_dma_transmission(priv, priv->ioaddr);
@@ -4934,7 +4943,7 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
struct dma_desc *p, struct dma_desc *np,
struct xdp_buff *xdp)
{
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int len = xdp->data_end - xdp->data;
enum pkt_hash_types hash_type;
@@ -4964,10 +4973,10 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
skb_record_rx_queue(skb, queue);
napi_gro_receive(&ch->rxtx_napi, skb);
- flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_pkt_n++;
- rx_q->rxq_stats.rx_bytes += len;
- u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+ flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
+ rxq_stats->rx_pkt_n++;
+ rxq_stats->rx_bytes += len;
+ u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
}
static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
@@ -5040,6 +5049,7 @@ static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
{
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
unsigned int count = 0, error = 0, len = 0;
int dirty = stmmac_rx_dirty(priv, queue);
@@ -5203,9 +5213,9 @@ read_again:
stmmac_finalize_xdp_rx(priv, xdp_status);
- flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_pkt_n += count;
- u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+ flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
+ rxq_stats->rx_pkt_n += count;
+ u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
priv->xstats.rx_dropped += rx_dropped;
priv->xstats.rx_errors += rx_errors;
@@ -5233,6 +5243,7 @@ read_again:
static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int count = 0, error = 0, len = 0;
@@ -5494,11 +5505,11 @@ drain_data:
stmmac_rx_refill(priv, queue);
- flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_packets += rx_packets;
- rx_q->rxq_stats.rx_bytes += rx_bytes;
- rx_q->rxq_stats.rx_pkt_n += count;
- u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+ flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
+ rxq_stats->rx_packets += rx_packets;
+ rxq_stats->rx_bytes += rx_bytes;
+ rxq_stats->rx_pkt_n += count;
+ u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
priv->xstats.rx_dropped += rx_dropped;
priv->xstats.rx_errors += rx_errors;
@@ -5511,15 +5522,15 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, rx_napi);
struct stmmac_priv *priv = ch->priv_data;
- struct stmmac_rx_queue *rx_q;
+ struct stmmac_rxq_stats *rxq_stats;
u32 chan = ch->index;
unsigned long flags;
int work_done;
- rx_q = &priv->dma_conf.rx_queue[chan];
- flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.napi_poll++;
- u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+ rxq_stats = &priv->xstats.rxq_stats[chan];
+ flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
+ rxq_stats->napi_poll++;
+ u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
work_done = stmmac_rx(priv, budget, chan);
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -5538,15 +5549,15 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, tx_napi);
struct stmmac_priv *priv = ch->priv_data;
- struct stmmac_tx_queue *tx_q;
+ struct stmmac_txq_stats *txq_stats;
u32 chan = ch->index;
unsigned long flags;
int work_done;
- tx_q = &priv->dma_conf.tx_queue[chan];
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.napi_poll++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ txq_stats = &priv->xstats.txq_stats[chan];
+ flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+ txq_stats->napi_poll++;
+ u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
work_done = stmmac_tx_clean(priv, budget, chan);
work_done = min(work_done, budget);
@@ -5568,20 +5579,20 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
container_of(napi, struct stmmac_channel, rxtx_napi);
struct stmmac_priv *priv = ch->priv_data;
int rx_done, tx_done, rxtx_done;
- struct stmmac_rx_queue *rx_q;
- struct stmmac_tx_queue *tx_q;
+ struct stmmac_rxq_stats *rxq_stats;
+ struct stmmac_txq_stats *txq_stats;
u32 chan = ch->index;
unsigned long flags;
- rx_q = &priv->dma_conf.rx_queue[chan];
- flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.napi_poll++;
- u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+ rxq_stats = &priv->xstats.rxq_stats[chan];
+ flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
+ rxq_stats->napi_poll++;
+ u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
- tx_q = &priv->dma_conf.tx_queue[chan];
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.napi_poll++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ txq_stats = &priv->xstats.txq_stats[chan];
+ flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+ txq_stats->napi_poll++;
+ u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
tx_done = stmmac_tx_clean(priv, budget, chan);
tx_done = min(tx_done, budget);
@@ -5991,33 +6002,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
return IRQ_HANDLED;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Polling receive - used by NETCONSOLE and other diagnostic tools
- * to allow network I/O with interrupts disabled.
- */
-static void stmmac_poll_controller(struct net_device *dev)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- int i;
-
- /* If adapter is down, do nothing */
- if (test_bit(STMMAC_DOWN, &priv->state))
- return;
-
- if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) {
- for (i = 0; i < priv->plat->rx_queues_to_use; i++)
- stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
-
- for (i = 0; i < priv->plat->tx_queues_to_use; i++)
- stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
- } else {
- disable_irq(dev->irq);
- stmmac_interrupt(dev->irq, dev);
- enable_irq(dev->irq);
- }
-}
-#endif
-
/**
* stmmac_ioctl - Entry point for the Ioctl
* @dev: Device pointer.
@@ -6924,7 +6908,7 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64
int q;
for (q = 0; q < tx_cnt; q++) {
- struct stmmac_txq_stats *txq_stats = &priv->dma_conf.tx_queue[q].txq_stats;
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
u64 tx_packets;
u64 tx_bytes;
@@ -6939,7 +6923,7 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64
}
for (q = 0; q < rx_cnt; q++) {
- struct stmmac_rxq_stats *rxq_stats = &priv->dma_conf.rx_queue[q].rxq_stats;
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
u64 rx_packets;
u64 rx_bytes;
@@ -6978,9 +6962,6 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_get_stats64 = stmmac_get_stats64,
.ndo_setup_tc = stmmac_setup_tc,
.ndo_select_queue = stmmac_select_queue,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = stmmac_poll_controller,
-#endif
.ndo_set_mac_address = stmmac_set_mac_address,
.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
@@ -7340,9 +7321,9 @@ int stmmac_dvr_probe(struct device *device,
priv->dev = ndev;
for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
- u64_stats_init(&priv->dma_conf.rx_queue[i].rxq_stats.syncp);
+ u64_stats_init(&priv->xstats.rxq_stats[i].syncp);
for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
- u64_stats_init(&priv->dma_conf.tx_queue[i].txq_stats.syncp);
+ u64_stats_init(&priv->xstats.txq_stats[i].syncp);
stmmac_set_ethtool_ops(ndev);
priv->pause = pause;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 35f4b1484029..2f0678f15fb7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -419,9 +419,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
return ERR_PTR(phy_mode);
plat->phy_interface = phy_mode;
- plat->mac_interface = stmmac_of_get_mac_mode(np);
- if (plat->mac_interface < 0)
- plat->mac_interface = plat->phy_interface;
+ rc = stmmac_of_get_mac_mode(np);
+ plat->mac_interface = rc < 0 ? plat->phy_interface : rc;
/* Some wrapper drivers still rely on phy_node. Let's save it while
* they are not converted to phylink. */
@@ -902,7 +901,7 @@ static int __maybe_unused stmmac_pltfr_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
int ret;
- ret = stmmac_pltfr_init(pdev, priv->plat->bsp_priv);
+ ret = stmmac_pltfr_init(pdev, priv->plat);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 88b5b1b47779..0a3346650e03 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -199,6 +199,7 @@ config TI_ICSSG_PRUETH
config TI_ICSS_IEP
tristate "TI PRU ICSS IEP driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
depends on TI_PRUSS
default TI_PRUSS
help
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index bea6fc0f324c..24120605502f 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -1747,9 +1747,10 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
}
tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
- if (tx_chn->irq <= 0) {
+ if (tx_chn->irq < 0) {
dev_err(dev, "Failed to get tx dma irq %d\n",
tx_chn->irq);
+ ret = tx_chn->irq;
goto err;
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 410612f43cbd..4914d0ef58e9 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -316,12 +316,12 @@ static int prueth_init_tx_chns(struct prueth_emac *emac)
goto fail;
}
- tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
- if (tx_chn->irq <= 0) {
- ret = -EINVAL;
+ ret = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
+ if (ret < 0) {
netdev_err(ndev, "failed to get tx irq\n");
goto fail;
}
+ tx_chn->irq = ret;
snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d",
dev_name(dev), tx_chn->id);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index c3f30663070f..b7e151439c48 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -1330,8 +1330,7 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
struct crypto_aead *tfm;
int ret;
- /* Pick a sync gcm(aes) cipher to ensure order is preserved. */
- tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
if (IS_ERR(tfm))
return tfm;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index b6d7981b2d1e..927d3d54658e 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1800,9 +1800,6 @@ static const struct ksz9477_errata_write ksz9477_errata_writes[] = {
/* Transmit waveform amplitude can be improved (1000BASE-T, 100BASE-TX, 10BASE-Te) */
{0x1c, 0x04, 0x00d0},
- /* Energy Efficient Ethernet (EEE) feature select must be manually disabled */
- {0x07, 0x3c, 0x0000},
-
/* Register settings are required to meet data sheet supply current specifications */
{0x1c, 0x13, 0x6eff},
{0x1c, 0x14, 0xe6ff},
@@ -1847,6 +1844,12 @@ static int ksz9477_config_init(struct phy_device *phydev)
return err;
}
+ /* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes
+ * in this switch shall be regarded as broken.
+ */
+ if (phydev->dev_flags & MICREL_NO_EEE)
+ phydev->eee_broken_modes = -1;
+
err = genphy_restart_aneg(phydev);
if (err)
return err;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index e8b94580194e..508d9a392ab1 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2115,7 +2115,12 @@ static const struct ethtool_ops team_ethtool_ops = {
static void team_setup_by_port(struct net_device *dev,
struct net_device *port_dev)
{
- dev->header_ops = port_dev->header_ops;
+ struct team *team = netdev_priv(dev);
+
+ if (port_dev->type == ARPHRD_ETHER)
+ dev->header_ops = team->header_ops_cache;
+ else
+ dev->header_ops = port_dev->header_ops;
dev->type = port_dev->type;
dev->hard_header_len = port_dev->hard_header_len;
dev->needed_headroom = port_dev->needed_headroom;
@@ -2162,8 +2167,11 @@ static int team_dev_type_check_change(struct net_device *dev,
static void team_setup(struct net_device *dev)
{
+ struct team *team = netdev_priv(dev);
+
ether_setup(dev);
dev->max_mtu = ETH_MAX_MTU;
+ team->header_ops_cache = dev->header_ops;
dev->netdev_ops = &team_netdev_ops;
dev->ethtool_ops = &team_ethtool_ops;
diff --git a/drivers/net/thunderbolt/main.c b/drivers/net/thunderbolt/main.c
index 0c1e8970ee58..0a53ec293d04 100644
--- a/drivers/net/thunderbolt/main.c
+++ b/drivers/net/thunderbolt/main.c
@@ -1049,12 +1049,11 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
*tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, 0,
ip_hdr(skb)->protocol, 0);
- } else if (skb_is_gso_v6(skb)) {
+ } else if (skb_is_gso(skb) && skb_is_gso_v6(skb)) {
tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, 0,
IPPROTO_TCP, 0);
- return false;
} else if (protocol == htons(ETH_P_IPV6)) {
tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset;
*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 332c853ca99b..0c13d9950cd8 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -2636,6 +2636,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
struct r8152 *tp = container_of(napi, struct r8152, napi);
int work_done;
+ if (!budget)
+ return 0;
+
work_done = rx_bottom(tp, budget);
if (work_done < budget) {
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 5d6454fedb3f..78ad2da3ee29 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -90,7 +90,9 @@ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
- if (unlikely(ret < 0)) {
+ if (unlikely(ret < 4)) {
+ ret = ret < 0 ? ret : -ENODATA;
+
netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
index, ret);
return ret;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index d43e62ebc2fc..0deefd1573cf 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -344,6 +344,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
struct veth_rq *rq = NULL;
+ int ret = NETDEV_TX_OK;
struct net_device *rcv;
int length = skb->len;
bool use_napi = false;
@@ -378,11 +379,12 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
drop:
atomic64_inc(&priv->dropped);
+ ret = NET_XMIT_DROP;
}
rcu_read_unlock();
- return NETDEV_TX_OK;
+ return ret;
}
static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
@@ -1444,6 +1446,8 @@ static int veth_open(struct net_device *dev)
netif_carrier_on(peer);
}
+ veth_set_xdp_features(dev);
+
return 0;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 231ad91a919d..fe7f314d65c9 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -132,6 +132,14 @@ struct virtnet_interrupt_coalesce {
u32 max_usecs;
};
+/* The dma information of pages allocated at a time. */
+struct virtnet_rq_dma {
+ dma_addr_t addr;
+ u32 ref;
+ u16 len;
+ u16 need_sync;
+};
+
/* Internal representation of a send virtqueue */
struct send_queue {
/* Virtqueue associated with this send _queue */
@@ -185,6 +193,12 @@ struct receive_queue {
char name[16];
struct xdp_rxq_info xdp_rxq;
+
+ /* Record the last dma info to free after new pages is allocated. */
+ struct virtnet_rq_dma *last_dma;
+
+ /* Do dma by self */
+ bool do_dma;
};
/* This structure can contain rss message with maximum settings for indirection table and keysize
@@ -580,6 +594,156 @@ ok:
return skb;
}
+static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
+{
+ struct page *page = virt_to_head_page(buf);
+ struct virtnet_rq_dma *dma;
+ void *head;
+ int offset;
+
+ head = page_address(page);
+
+ dma = head;
+
+ --dma->ref;
+
+ if (dma->ref) {
+ if (dma->need_sync && len) {
+ offset = buf - (head + sizeof(*dma));
+
+ virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr, offset,
+ len, DMA_FROM_DEVICE);
+ }
+
+ return;
+ }
+
+ virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ put_page(page);
+}
+
+static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
+{
+ void *buf;
+
+ buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
+ if (buf && rq->do_dma)
+ virtnet_rq_unmap(rq, buf, *len);
+
+ return buf;
+}
+
+static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq)
+{
+ void *buf;
+
+ buf = virtqueue_detach_unused_buf(rq->vq);
+ if (buf && rq->do_dma)
+ virtnet_rq_unmap(rq, buf, 0);
+
+ return buf;
+}
+
+static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
+{
+ struct virtnet_rq_dma *dma;
+ dma_addr_t addr;
+ u32 offset;
+ void *head;
+
+ if (!rq->do_dma) {
+ sg_init_one(rq->sg, buf, len);
+ return;
+ }
+
+ head = page_address(rq->alloc_frag.page);
+
+ offset = buf - head;
+
+ dma = head;
+
+ addr = dma->addr - sizeof(*dma) + offset;
+
+ sg_init_table(rq->sg, 1);
+ rq->sg[0].dma_address = addr;
+ rq->sg[0].length = len;
+}
+
+static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
+{
+ struct page_frag *alloc_frag = &rq->alloc_frag;
+ struct virtnet_rq_dma *dma;
+ void *buf, *head;
+ dma_addr_t addr;
+
+ if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp)))
+ return NULL;
+
+ head = page_address(alloc_frag->page);
+
+ if (rq->do_dma) {
+ dma = head;
+
+ /* new pages */
+ if (!alloc_frag->offset) {
+ if (rq->last_dma) {
+ /* Now, the new page is allocated, the last dma
+ * will not be used. So the dma can be unmapped
+ * if the ref is 0.
+ */
+ virtnet_rq_unmap(rq, rq->last_dma, 0);
+ rq->last_dma = NULL;
+ }
+
+ dma->len = alloc_frag->size - sizeof(*dma);
+
+ addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
+ dma->len, DMA_FROM_DEVICE, 0);
+ if (virtqueue_dma_mapping_error(rq->vq, addr))
+ return NULL;
+
+ dma->addr = addr;
+ dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
+
+ /* Add a reference to dma to prevent the entire dma from
+ * being released during error handling. This reference
+ * will be freed after the pages are no longer used.
+ */
+ get_page(alloc_frag->page);
+ dma->ref = 1;
+ alloc_frag->offset = sizeof(*dma);
+
+ rq->last_dma = dma;
+ }
+
+ ++dma->ref;
+ }
+
+ buf = head + alloc_frag->offset;
+
+ get_page(alloc_frag->page);
+ alloc_frag->offset += size;
+
+ return buf;
+}
+
+static void virtnet_rq_set_premapped(struct virtnet_info *vi)
+{
+ int i;
+
+ /* disable for big mode */
+ if (!vi->mergeable_rx_bufs && vi->big_packets)
+ return;
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ if (virtqueue_set_dma_premapped(vi->rq[i].vq))
+ continue;
+
+ vi->rq[i].do_dma = true;
+ }
+}
+
static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
{
unsigned int len;
@@ -935,7 +1099,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
void *buf;
int off;
- buf = virtqueue_get_buf(rq->vq, &buflen);
+ buf = virtnet_rq_get_buf(rq, &buflen, NULL);
if (unlikely(!buf))
goto err_buf;
@@ -1155,7 +1319,7 @@ static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
int len;
while (num_buf-- > 1) {
- buf = virtqueue_get_buf(rq->vq, &len);
+ buf = virtnet_rq_get_buf(rq, &len, NULL);
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers missing\n",
dev->name, num_buf);
@@ -1263,7 +1427,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
return -EINVAL;
while (--*num_buf > 0) {
- buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
+ buf = virtnet_rq_get_buf(rq, &len, &ctx);
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers out of %d missing\n",
dev->name, *num_buf,
@@ -1492,7 +1656,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
while (--num_buf) {
int num_skb_frags;
- buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
+ buf = virtnet_rq_get_buf(rq, &len, &ctx);
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers out of %d missing\n",
dev->name, num_buf,
@@ -1651,7 +1815,6 @@ frame_err:
static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
gfp_t gfp)
{
- struct page_frag *alloc_frag = &rq->alloc_frag;
char *buf;
unsigned int xdp_headroom = virtnet_get_headroom(vi);
void *ctx = (void *)(unsigned long)xdp_headroom;
@@ -1660,17 +1823,21 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
len = SKB_DATA_ALIGN(len) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
+
+ buf = virtnet_rq_alloc(rq, len, gfp);
+ if (unlikely(!buf))
return -ENOMEM;
- buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
- get_page(alloc_frag->page);
- alloc_frag->offset += len;
- sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom,
- vi->hdr_len + GOOD_PACKET_LEN);
+ virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom,
+ vi->hdr_len + GOOD_PACKET_LEN);
+
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
- if (err < 0)
+ if (err < 0) {
+ if (rq->do_dma)
+ virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
+ }
+
return err;
}
@@ -1747,23 +1914,22 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
unsigned int headroom = virtnet_get_headroom(vi);
unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
- char *buf;
+ unsigned int len, hole;
void *ctx;
+ char *buf;
int err;
- unsigned int len, hole;
/* Extra tailroom is needed to satisfy XDP's assumption. This
* means rx frags coalescing won't work, but consider we've
* disabled GSO for XDP, it won't be a big issue.
*/
len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
- if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
+
+ buf = virtnet_rq_alloc(rq, len + room, gfp);
+ if (unlikely(!buf))
return -ENOMEM;
- buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
buf += headroom; /* advance address leaving hole at front of pkt */
- get_page(alloc_frag->page);
- alloc_frag->offset += len + room;
hole = alloc_frag->size - alloc_frag->offset;
if (hole < len + room) {
/* To avoid internal fragmentation, if there is very likely not
@@ -1777,11 +1943,15 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
alloc_frag->offset += hole;
}
- sg_init_one(rq->sg, buf, len);
+ virtnet_rq_init_one_sg(rq, buf, len);
+
ctx = mergeable_len_to_ctx(len + room, headroom);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
- if (err < 0)
+ if (err < 0) {
+ if (rq->do_dma)
+ virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
+ }
return err;
}
@@ -1902,13 +2072,13 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
void *ctx;
while (stats.packets < budget &&
- (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
+ (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
stats.packets++;
}
} else {
while (stats.packets < budget &&
- (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
+ (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
stats.packets++;
}
@@ -3808,8 +3978,11 @@ static void free_receive_page_frags(struct virtnet_info *vi)
{
int i;
for (i = 0; i < vi->max_queue_pairs; i++)
- if (vi->rq[i].alloc_frag.page)
+ if (vi->rq[i].alloc_frag.page) {
+ if (vi->rq[i].do_dma && vi->rq[i].last_dma)
+ virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
put_page(vi->rq[i].alloc_frag.page);
+ }
}
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
@@ -3846,9 +4019,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
}
for (i = 0; i < vi->max_queue_pairs; i++) {
- struct virtqueue *vq = vi->rq[i].vq;
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
- virtnet_rq_free_unused_buf(vq, buf);
+ struct receive_queue *rq = &vi->rq[i];
+
+ while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL)
+ virtnet_rq_free_unused_buf(rq->vq, buf);
cond_resched();
}
}
@@ -4022,6 +4196,8 @@ static int init_vqs(struct virtnet_info *vi)
if (ret)
goto err_free;
+ virtnet_rq_set_premapped(vi);
+
cpus_read_lock();
virtnet_set_affinity(vi);
cpus_read_unlock();
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index e463f59e95c2..5b5597073b00 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -4331,6 +4331,10 @@ static size_t vxlan_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LOCALBYPASS */
+ nla_total_size(0) + /* IFLA_VXLAN_GBP */
+ nla_total_size(0) + /* IFLA_VXLAN_GPE */
+ nla_total_size(0) + /* IFLA_VXLAN_REMCSUM_NOPARTIAL */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_VNIFILTER */
0;
}
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 47c2ad7a3e42..fd50bb313b92 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -34,6 +34,8 @@
#define TDM_PPPOHT_SLIC_MAXIN
#define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
+static int uhdlc_close(struct net_device *dev);
+
static struct ucc_tdm_info utdm_primary_info = {
.uf_info = {
.tsa = 0,
@@ -708,6 +710,7 @@ static int uhdlc_open(struct net_device *dev)
hdlc_device *hdlc = dev_to_hdlc(dev);
struct ucc_hdlc_private *priv = hdlc->priv;
struct ucc_tdm *utdm = priv->utdm;
+ int rc = 0;
if (priv->hdlc_busy != 1) {
if (request_irq(priv->ut_info->uf_info.irq,
@@ -731,10 +734,13 @@ static int uhdlc_open(struct net_device *dev)
napi_enable(&priv->napi);
netdev_reset_queue(dev);
netif_start_queue(dev);
- hdlc_open(dev);
+
+ rc = hdlc_open(dev);
+ if (rc)
+ uhdlc_close(dev);
}
- return 0;
+ return rc;
}
static void uhdlc_memclean(struct ucc_hdlc_private *priv)
@@ -824,6 +830,8 @@ static int uhdlc_close(struct net_device *dev)
netdev_reset_queue(dev);
priv->hdlc_busy = 0;
+ hdlc_close(dev);
+
return 0;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index bece26741d3a..611d1a6aabb9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -442,7 +442,12 @@ struct brcmf_scan_params_v2_le {
* fixed parameter portion is assumed, otherwise
* ssid in the fixed portion is ignored
*/
- __le16 channel_list[1]; /* list of chanspecs */
+ union {
+ __le16 padding; /* Reserve space for at least 1 entry for abort
+ * which uses an on stack brcmf_scan_params_v2_le
+ */
+ DECLARE_FLEX_ARRAY(__le16, channel_list); /* chanspecs */
+ };
};
struct brcmf_scan_results {
@@ -702,7 +707,7 @@ struct brcmf_sta_info_le {
struct brcmf_chanspec_list {
__le32 count; /* # of entries */
- __le32 element[1]; /* variable length uint32 list */
+ __le32 element[]; /* variable length uint32 list */
};
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index f5e08988dc7b..06d6f7f66430 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -310,9 +310,9 @@ struct iwl_fw_ini_fifo_hdr {
struct iwl_fw_ini_error_dump_range {
__le32 range_data_size;
union {
- __le32 internal_base_addr;
- __le64 dram_base_addr;
- __le32 page_num;
+ __le32 internal_base_addr __packed;
+ __le64 dram_base_addr __packed;
+ __le32 page_num __packed;
struct iwl_fw_ini_fifo_hdr fifo_hdr;
struct iwl_cmd_header fw_pkt_hdr;
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 1f5db65a088d..1d5ee4330f29 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -802,7 +802,7 @@ out:
mvm->nvm_data->bands[0].n_channels = 1;
mvm->nvm_data->bands[0].n_bitrates = 1;
mvm->nvm_data->bands[0].bitrates =
- (void *)((u8 *)mvm->nvm_data->channels + 1);
+ (void *)(mvm->nvm_data->channels + 1);
mvm->nvm_data->bands[0].bitrates->hw_value = 10;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 8b6c641772ee..b719843e9457 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -731,73 +731,78 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
mvmvif->associated = vif->cfg.assoc;
- if (!(changes & BSS_CHANGED_ASSOC))
- return;
-
- if (vif->cfg.assoc) {
- /* clear statistics to get clean beacon counter */
- iwl_mvm_request_statistics(mvm, true);
- iwl_mvm_sf_update(mvm, vif, false);
- iwl_mvm_power_vif_assoc(mvm, vif);
-
- for_each_mvm_vif_valid_link(mvmvif, i) {
- memset(&mvmvif->link[i]->beacon_stats, 0,
- sizeof(mvmvif->link[i]->beacon_stats));
+ if (changes & BSS_CHANGED_ASSOC) {
+ if (vif->cfg.assoc) {
+ /* clear statistics to get clean beacon counter */
+ iwl_mvm_request_statistics(mvm, true);
+ iwl_mvm_sf_update(mvm, vif, false);
+ iwl_mvm_power_vif_assoc(mvm, vif);
+
+ for_each_mvm_vif_valid_link(mvmvif, i) {
+ memset(&mvmvif->link[i]->beacon_stats, 0,
+ sizeof(mvmvif->link[i]->beacon_stats));
+
+ if (vif->p2p) {
+ iwl_mvm_update_smps(mvm, vif,
+ IWL_MVM_SMPS_REQ_PROT,
+ IEEE80211_SMPS_DYNAMIC, i);
+ }
+
+ rcu_read_lock();
+ link_conf = rcu_dereference(vif->link_conf[i]);
+ if (link_conf && !link_conf->dtim_period)
+ protect = true;
+ rcu_read_unlock();
+ }
- if (vif->p2p) {
- iwl_mvm_update_smps(mvm, vif,
- IWL_MVM_SMPS_REQ_PROT,
- IEEE80211_SMPS_DYNAMIC, i);
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ protect) {
+ /* If we're not restarting and still haven't
+ * heard a beacon (dtim period unknown) then
+ * make sure we still have enough minimum time
+ * remaining in the time event, since the auth
+ * might actually have taken quite a while
+ * (especially for SAE) and so the remaining
+ * time could be small without us having heard
+ * a beacon yet.
+ */
+ iwl_mvm_protect_assoc(mvm, vif, 0);
}
- rcu_read_lock();
- link_conf = rcu_dereference(vif->link_conf[i]);
- if (link_conf && !link_conf->dtim_period)
- protect = true;
- rcu_read_unlock();
- }
+ iwl_mvm_sf_update(mvm, vif, false);
+
+ /* FIXME: need to decide about misbehaving AP handling */
+ iwl_mvm_power_vif_assoc(mvm, vif);
+ } else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) {
+ iwl_mvm_mei_host_disassociated(mvm);
- if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
- protect) {
- /* If we're not restarting and still haven't
- * heard a beacon (dtim period unknown) then
- * make sure we still have enough minimum time
- * remaining in the time event, since the auth
- * might actually have taken quite a while
- * (especially for SAE) and so the remaining
- * time could be small without us having heard
- * a beacon yet.
+ /* If update fails - SF might be running in associated
+ * mode while disassociated - which is forbidden.
*/
- iwl_mvm_protect_assoc(mvm, vif, 0);
+ ret = iwl_mvm_sf_update(mvm, vif, false);
+ WARN_ONCE(ret &&
+ !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status),
+ "Failed to update SF upon disassociation\n");
+
+ /* If we get an assert during the connection (after the
+ * station has been added, but before the vif is set
+ * to associated), mac80211 will re-add the station and
+ * then configure the vif. Since the vif is not
+ * associated, we would remove the station here and
+ * this would fail the recovery.
+ */
+ iwl_mvm_mld_vif_delete_all_stas(mvm, vif);
}
- iwl_mvm_sf_update(mvm, vif, false);
-
- /* FIXME: need to decide about misbehaving AP handling */
- iwl_mvm_power_vif_assoc(mvm, vif);
- } else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) {
- iwl_mvm_mei_host_disassociated(mvm);
-
- /* If update fails - SF might be running in associated
- * mode while disassociated - which is forbidden.
- */
- ret = iwl_mvm_sf_update(mvm, vif, false);
- WARN_ONCE(ret &&
- !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
- &mvm->status),
- "Failed to update SF upon disassociation\n");
-
- /* If we get an assert during the connection (after the
- * station has been added, but before the vif is set
- * to associated), mac80211 will re-add the station and
- * then configure the vif. Since the vif is not
- * associated, we would remove the station here and
- * this would fail the recovery.
- */
- iwl_mvm_mld_vif_delete_all_stas(mvm, vif);
+ iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes);
}
- iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes);
+ if (changes & BSS_CHANGED_PS) {
+ ret = iwl_mvm_power_update_mac(mvm);
+ if (ret)
+ IWL_ERR(mvm, "failed to update power mode\n");
+ }
}
static void
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index c1d9ce753468..3cbe2c0b8d6b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -2342,7 +2342,7 @@ iwl_mvm_scan_umac_fill_general_p_v12(struct iwl_mvm *mvm,
if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
gp->num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
- if (version < 12) {
+ if (version < 16) {
gp->scan_start_mac_or_link_id = scan_vif->id;
} else {
struct iwl_mvm_vif_link_info *link_info;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 36d70d589aed..898dca393643 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1612,6 +1612,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
memset(&info->status, 0, sizeof(info->status));
+ info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED);
/* inform mac80211 about what happened with the frame */
switch (status & TX_STATUS_MSK) {
@@ -1964,6 +1965,8 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
*/
if (!is_flush)
info->flags |= IEEE80211_TX_STAT_ACK;
+ else
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
}
/*
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 391793a16adc..10690e82358b 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -918,9 +918,17 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
event_buf, len);
- while (tlv_buf_left >= sizeof(*tlv_rxba)) {
+ while (tlv_buf_left > sizeof(*tlv_rxba)) {
tlv_type = le16_to_cpu(tlv_rxba->header.type);
tlv_len = le16_to_cpu(tlv_rxba->header.len);
+ if (size_add(sizeof(tlv_rxba->header), tlv_len) > tlv_buf_left) {
+ mwifiex_dbg(priv->adapter, WARN,
+ "TLV size (%zu) overflows event_buf buf_left=%d\n",
+ size_add(sizeof(tlv_rxba->header), tlv_len),
+ tlv_buf_left);
+ return;
+ }
+
if (tlv_type != TLV_TYPE_RXBA_SYNC) {
mwifiex_dbg(priv->adapter, ERROR,
"Wrong TLV id=0x%x\n", tlv_type);
@@ -929,6 +937,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
tlv_seq_num = le16_to_cpu(tlv_rxba->seq_num);
tlv_bitmap_len = le16_to_cpu(tlv_rxba->bitmap_len);
+ if (size_add(sizeof(*tlv_rxba), tlv_bitmap_len) > tlv_buf_left) {
+ mwifiex_dbg(priv->adapter, WARN,
+ "TLV size (%zu) overflows event_buf buf_left=%d\n",
+ size_add(sizeof(*tlv_rxba), tlv_bitmap_len),
+ tlv_buf_left);
+ return;
+ }
+
mwifiex_dbg(priv->adapter, INFO,
"%pM tid=%d seq_num=%d bitmap_len=%d\n",
tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
@@ -965,8 +981,8 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
}
}
- tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len);
- tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba);
+ tlv_buf_left -= (sizeof(tlv_rxba->header) + tlv_len);
+ tmp = (u8 *)tlv_rxba + sizeof(tlv_rxba->header) + tlv_len;
tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp;
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index f2168fac95ed..8e6db904e5b2 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -779,7 +779,7 @@ struct mwifiex_ie_types_rxba_sync {
u8 reserved;
__le16 seq_num;
__le16 bitmap_len;
- u8 bitmap[1];
+ u8 bitmap[];
} __packed;
struct chan_band_param_set {
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
index 65420ad67416..257737137cd7 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
@@ -86,7 +86,8 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length);
rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off;
- if (sizeof(*rx_pkt_hdr) + rx_pkt_off > skb->len) {
+ if (sizeof(rx_pkt_hdr->eth803_hdr) + sizeof(rfc1042_header) +
+ rx_pkt_off > skb->len) {
mwifiex_dbg(priv->adapter, ERROR,
"wrong rx packet offset: len=%d, rx_pkt_off=%d\n",
skb->len, rx_pkt_off);
@@ -95,12 +96,13 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
return -1;
}
- if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
- sizeof(bridge_tunnel_header))) ||
- (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
- sizeof(rfc1042_header)) &&
- ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
- ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) {
+ if (sizeof(*rx_pkt_hdr) + rx_pkt_off <= skb->len &&
+ ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
+ sizeof(bridge_tunnel_header))) ||
+ (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
+ sizeof(rfc1042_header)) &&
+ ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
+ ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX))) {
/*
* Replace the 803 header and rfc1042 header (llc/snap) with an
* EthernetII header, keep the src/dst and snap_type
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 05d9ab3ce819..dc8f4e157eb2 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -93,13 +93,13 @@ __mt76_get_rxwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t = NULL;
- spin_lock(&dev->wed_lock);
+ spin_lock_bh(&dev->wed_lock);
if (!list_empty(&dev->rxwi_cache)) {
t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
list);
list_del(&t->list);
}
- spin_unlock(&dev->wed_lock);
+ spin_unlock_bh(&dev->wed_lock);
return t;
}
@@ -145,9 +145,9 @@ mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
if (!t)
return;
- spin_lock(&dev->wed_lock);
+ spin_lock_bh(&dev->wed_lock);
list_add(&t->list, &dev->rxwi_cache);
- spin_unlock(&dev->wed_lock);
+ spin_unlock_bh(&dev->wed_lock);
}
EXPORT_SYMBOL_GPL(mt76_put_rxwi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
index 0acabba2d1a5..5d402cf2951c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
@@ -131,15 +131,8 @@ u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
s8 *lna_2g, s8 *lna_5g,
struct ieee80211_channel *chan)
{
- u16 val;
u8 lna;
- val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
- if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G)
- *lna_2g = 0;
- if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G)
- memset(lna_5g, 0, sizeof(s8) * 3);
-
if (chan->band == NL80211_BAND_2GHZ)
lna = *lna_2g;
else if (chan->hw_value <= 64)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index d5809408d1d3..8c01855885ce 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -256,7 +256,8 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
struct ieee80211_channel *chan = dev->mphy.chandef.chan;
int channel = chan->hw_value;
s8 lna_5g[3], lna_2g;
- u8 lna;
+ bool use_lna;
+ u8 lna = 0;
u16 val;
if (chan->band == NL80211_BAND_2GHZ)
@@ -275,7 +276,15 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
- lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
+ val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
+ if (chan->band == NL80211_BAND_2GHZ)
+ use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_2G);
+ else
+ use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_5G);
+
+ if (use_lna)
+ lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
+
dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8);
}
EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.h b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
index 3642a2c7f80c..2434e2480cbe 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
@@ -46,6 +46,7 @@ struct rtw8723du_efuse {
u8 vender_id[2]; /* 0x100 */
u8 product_id[2]; /* 0x102 */
u8 usb_option; /* 0x104 */
+ u8 res5[2]; /* 0x105 */
u8 mac_addr[ETH_ALEN]; /* 0x107 */
};
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index dca25a0c2f33..3ae4b41c59ac 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -336,6 +336,7 @@ MODULE_DEVICE_TABLE(of, of_nxp_nci_i2c_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id acpi_id[] = {
{ "NXP1001" },
+ { "NXP1002" },
{ "NXP7471" },
{ }
};
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index 4940b6301d83..d687e8c2cc78 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -941,13 +941,10 @@ static void ndev_init_debugfs(struct amd_ntb_dev *ndev)
ndev->debugfs_dir =
debugfs_create_dir(pci_name(ndev->ntb.pdev),
debugfs_dir);
- if (IS_ERR(ndev->debugfs_dir))
- ndev->debugfs_info = NULL;
- else
- ndev->debugfs_info =
- debugfs_create_file("info", S_IRUSR,
- ndev->debugfs_dir, ndev,
- &amd_ntb_debugfs_info);
+ ndev->debugfs_info =
+ debugfs_create_file("info", S_IRUSR,
+ ndev->debugfs_dir, ndev,
+ &amd_ntb_debugfs_info);
}
}
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 2abd2235bbca..f9e7847a378e 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -909,7 +909,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
return 0;
}
-static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
+static void ntb_qp_link_context_reset(struct ntb_transport_qp *qp)
{
qp->link_is_up = false;
qp->active = false;
@@ -932,6 +932,13 @@ static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
qp->tx_async = 0;
}
+static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
+{
+ ntb_qp_link_context_reset(qp);
+ if (qp->remote_rx_info)
+ qp->remote_rx_info->entry = qp->rx_max_entry - 1;
+}
+
static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
{
struct ntb_transport_ctx *nt = qp->transport;
@@ -1174,7 +1181,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
qp->ndev = nt->ndev;
qp->client_ready = false;
qp->event_handler = NULL;
- ntb_qp_link_down_reset(qp);
+ ntb_qp_link_context_reset(qp);
if (mw_num < qp_count % mw_count)
num_qps_mw = qp_count / mw_count + 1;
@@ -1894,7 +1901,7 @@ err:
static int ntb_process_tx(struct ntb_transport_qp *qp,
struct ntb_queue_entry *entry)
{
- if (qp->tx_index == qp->remote_rx_info->entry) {
+ if (!ntb_transport_tx_free_entry(qp)) {
qp->tx_ring_full++;
return -EAGAIN;
}
@@ -2276,9 +2283,13 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
struct ntb_queue_entry *entry;
int rc;
- if (!qp || !qp->link_is_up || !len)
+ if (!qp || !len)
return -EINVAL;
+ /* If the qp link is down already, just ignore. */
+ if (!qp->link_is_up)
+ return 0;
+
entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
if (!entry) {
qp->tx_err_no_buf++;
@@ -2418,7 +2429,7 @@ unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
unsigned int head = qp->tx_index;
unsigned int tail = qp->remote_rx_info->entry;
- return tail > head ? tail - head : qp->tx_max_entry + tail - head;
+ return tail >= head ? tail - head : qp->tx_max_entry + tail - head;
}
EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry);
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 65e1e5cf1b29..553f1f46bc66 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -1355,7 +1355,7 @@ static void perf_setup_dbgfs(struct perf_ctx *perf)
struct pci_dev *pdev = perf->ntb->pdev;
perf->dbgfs_dir = debugfs_create_dir(pci_name(pdev), perf_dbgfs_topdir);
- if (!perf->dbgfs_dir) {
+ if (IS_ERR(perf->dbgfs_dir)) {
dev_warn(&perf->ntb->dev, "DebugFS unsupported\n");
return;
}
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
index eeeb4b1c97d2..641cb7e05a47 100644
--- a/drivers/ntb/test/ntb_tool.c
+++ b/drivers/ntb/test/ntb_tool.c
@@ -370,16 +370,9 @@ static ssize_t tool_fn_write(struct tool_ctx *tc,
if (*offp)
return 0;
- buf = kmalloc(size + 1, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- if (copy_from_user(buf, ubuf, size)) {
- kfree(buf);
- return -EFAULT;
- }
-
- buf[size] = 0;
+ buf = memdup_user_nul(ubuf, size);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
n = sscanf(buf, "%c %lli", &cmd, &bits);
@@ -1495,8 +1488,6 @@ static void tool_setup_dbgfs(struct tool_ctx *tc)
tc->dbgfs_dir = debugfs_create_dir(dev_name(&tc->ntb->dev),
tool_dbgfs_topdir);
- if (!tc->dbgfs_dir)
- return;
debugfs_create_file("port", 0600, tc->dbgfs_dir,
tc, &tool_port_fops);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f3a01b79148c..21783aa2ee8e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2245,25 +2245,8 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
else
ctrl->ctrl_config = NVME_CC_CSS_NVM;
- if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
- u32 crto;
-
- ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
- if (ret) {
- dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
- ret);
- return ret;
- }
-
- if (ctrl->cap & NVME_CAP_CRMS_CRIMS) {
- ctrl->ctrl_config |= NVME_CC_CRIME;
- timeout = NVME_CRTO_CRIMT(crto);
- } else {
- timeout = NVME_CRTO_CRWMT(crto);
- }
- } else {
- timeout = NVME_CAP_TIMEOUT(ctrl->cap);
- }
+ if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS)
+ ctrl->ctrl_config |= NVME_CC_CRIME;
ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
@@ -2277,6 +2260,39 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
if (ret)
return ret;
+ /* CAP value may change after initial CC write */
+ ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
+ if (ret)
+ return ret;
+
+ timeout = NVME_CAP_TIMEOUT(ctrl->cap);
+ if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
+ u32 crto, ready_timeout;
+
+ ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
+ if (ret) {
+ dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * CRTO should always be greater or equal to CAP.TO, but some
+ * devices are known to get this wrong. Use the larger of the
+ * two values.
+ */
+ if (ctrl->ctrl_config & NVME_CC_CRIME)
+ ready_timeout = NVME_CRTO_CRIMT(crto);
+ else
+ ready_timeout = NVME_CRTO_CRWMT(crto);
+
+ if (ready_timeout < timeout)
+ dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n",
+ crto, ctrl->cap);
+ else
+ timeout = ready_timeout;
+ }
+
ctrl->ctrl_config |= NVME_CC_ENABLE;
ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
if (ret)
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 1cd2bf82319a..a15b37750d6e 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1924,7 +1924,7 @@ char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req)
struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
struct request *rq = op->rq;
- if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq->bio)
+ if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq || !rq->bio)
return NULL;
return blkcg_get_fc_appid(rq->bio);
}
diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c
index 316f3e4ca7cc..8df73a0b3980 100644
--- a/drivers/nvme/host/hwmon.c
+++ b/drivers/nvme/host/hwmon.c
@@ -187,7 +187,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data,
return 0;
}
-static const struct hwmon_channel_info *nvme_hwmon_info[] = {
+static const struct hwmon_channel_info *const nvme_hwmon_info[] = {
HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
HWMON_CHANNEL_INFO(temp,
HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 2f57da12d983..347cb5daebc3 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2916,9 +2916,6 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
struct nvme_dev *dev;
int ret = -ENOMEM;
- if (node == NUMA_NO_NODE)
- set_dev_node(&pdev->dev, first_memory_node);
-
dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
if (!dev)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 868aa4de2e4c..cd92d7ddf5ed 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -348,7 +348,7 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
while (length) {
u32 iov_len = min_t(u32, length, sg->length - sg_offset);
- bvec_set_page(iov, sg_page(sg), sg->length,
+ bvec_set_page(iov, sg_page(sg), iov_len,
sg->offset + sg_offset);
length -= iov_len;
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 0a3483e247a8..f63250c650ca 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -890,13 +890,13 @@ int of_changeset_action(struct of_changeset *ocs, unsigned long action,
{
struct of_changeset_entry *ce;
+ if (WARN_ON(action >= ARRAY_SIZE(action_names)))
+ return -EINVAL;
+
ce = kzalloc(sizeof(*ce), GFP_KERNEL);
if (!ce)
return -ENOMEM;
- if (WARN_ON(action >= ARRAY_SIZE(action_names)))
- return -EINVAL;
-
/* get a reference to the node */
ce->action = action;
ce->np = of_node_get(np);
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index dfb6fb962fc7..a9a292d6d59b 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -45,8 +45,8 @@ struct target {
/**
* struct fragment - info about fragment nodes in overlay expanded device tree
- * @target: target of the overlay operation
* @overlay: pointer to the __overlay__ node
+ * @target: target of the overlay operation
*/
struct fragment {
struct device_node *overlay;
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 509a4072d50a..9ce0d20a6c58 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -214,7 +214,7 @@ struct ioa_registers {
struct ioc {
struct ioa_registers __iomem *ioc_regs; /* I/O MMU base address */
u8 *res_map; /* resource map, bit == pdir entry */
- u64 *pdir_base; /* physical base address */
+ __le64 *pdir_base; /* physical base address */
u32 pdir_size; /* bytes, function of IOV Space size */
u32 res_hint; /* next available IOVP -
circular search */
@@ -339,7 +339,7 @@ ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
BUG_ON(pages_needed == 0);
BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE);
- DBG_RES("%s() size: %d pages_needed %d\n",
+ DBG_RES("%s() size: %zu pages_needed %d\n",
__func__, size, pages_needed);
/*
@@ -427,7 +427,7 @@ ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE);
BUG_ON(pages_mapped > BITS_PER_LONG);
- DBG_RES("%s(): res_idx: %d pages_mapped %d\n",
+ DBG_RES("%s(): res_idx: %d pages_mapped %lu\n",
__func__, res_idx, pages_mapped);
#ifdef CCIO_COLLECT_STATS
@@ -543,7 +543,7 @@ static u32 hint_lookup[] = {
* index are bits 12:19 of the value returned by LCI.
*/
static void
-ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
+ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
unsigned long hints)
{
register unsigned long pa;
@@ -719,7 +719,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
unsigned long flags;
dma_addr_t iovp;
dma_addr_t offset;
- u64 *pdir_start;
+ __le64 *pdir_start;
unsigned long hint = hint_lookup[(int)direction];
BUG_ON(!dev);
@@ -746,8 +746,8 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
pdir_start = &(ioc->pdir_base[idx]);
- DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n",
- __func__, addr, (long)iovp | offset, size);
+ DBG_RUN("%s() %px -> %#lx size: %zu\n",
+ __func__, addr, (long)(iovp | offset), size);
/* If not cacheline aligned, force SAFE_DMA on the whole mess */
if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
@@ -805,7 +805,7 @@ ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
return;
}
- DBG_RUN("%s() iovp 0x%lx/%x\n",
+ DBG_RUN("%s() iovp %#lx/%zx\n",
__func__, (long)iova, size);
iova ^= offset; /* clear offset bits */
@@ -1283,7 +1283,7 @@ ccio_ioc_init(struct ioc *ioc)
iova_space_size>>20,
iov_order + PAGE_SHIFT);
- ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL,
+ ioc->pdir_base = (__le64 *)__get_free_pages(GFP_KERNEL,
get_order(ioc->pdir_size));
if(NULL == ioc->pdir_base) {
panic("%s() could not allocate I/O Page Table\n", __func__);
diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
index 0905be256de0..c43f1a212a5c 100644
--- a/drivers/parisc/iommu-helpers.h
+++ b/drivers/parisc/iommu-helpers.h
@@ -14,13 +14,13 @@
static inline unsigned int
iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
unsigned long hint,
- void (*iommu_io_pdir_entry)(u64 *, space_t, unsigned long,
+ void (*iommu_io_pdir_entry)(__le64 *, space_t, unsigned long,
unsigned long))
{
struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
unsigned int n_mappings = 0;
unsigned long dma_offset = 0, dma_len = 0;
- u64 *pdirp = NULL;
+ __le64 *pdirp = NULL;
/* Horrible hack. For efficiency's sake, dma_sg starts one
* entry below the true start (it is immediately incremented
@@ -31,8 +31,8 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
unsigned long vaddr;
long size;
- DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", nents,
- (unsigned long)sg_dma_address(startsg), cnt,
+ DBG_RUN_SG(" %d : %08lx %p/%05x\n", nents,
+ (unsigned long)sg_dma_address(startsg),
sg_virt(startsg), startsg->length
);
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index a7df764f1a72..a4011461189b 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -202,9 +202,9 @@ static inline void iosapic_write(void __iomem *iosapic, unsigned int reg, u32 va
static DEFINE_SPINLOCK(iosapic_lock);
-static inline void iosapic_eoi(void __iomem *addr, unsigned int data)
+static inline void iosapic_eoi(__le32 __iomem *addr, __le32 data)
{
- __raw_writel(data, addr);
+ __raw_writel((__force u32)data, addr);
}
/*
diff --git a/drivers/parisc/iosapic_private.h b/drivers/parisc/iosapic_private.h
index 73ecc657ad95..bd8ff40162b4 100644
--- a/drivers/parisc/iosapic_private.h
+++ b/drivers/parisc/iosapic_private.h
@@ -118,8 +118,8 @@ struct iosapic_irt {
struct vector_info {
struct iosapic_info *iosapic; /* I/O SAPIC this vector is on */
struct irt_entry *irte; /* IRT entry */
- u32 __iomem *eoi_addr; /* precalculate EOI reg address */
- u32 eoi_data; /* IA64: ? PA: swapped txn_data */
+ __le32 __iomem *eoi_addr; /* precalculate EOI reg address */
+ __le32 eoi_data; /* IA64: ? PA: swapped txn_data */
int txn_irq; /* virtual IRQ number for processor */
ulong txn_addr; /* IA64: id_eid PA: partial HPA */
u32 txn_data; /* CPU interrupt bit */
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index f6b510675318..05e7103d1d40 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -46,8 +46,6 @@
#include <linux/module.h>
#include <asm/ropes.h>
-#include <asm/mckinley.h> /* for proc_mckinley_root */
-#include <asm/runway.h> /* for proc_runway_root */
#include <asm/page.h> /* for PAGE0 */
#include <asm/pdc.h> /* for PDC_MODEL_* */
#include <asm/pdcpat.h> /* for is_pdc_pat() */
@@ -122,7 +120,7 @@ MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART");
#endif
static struct proc_dir_entry *proc_runway_root __ro_after_init;
-struct proc_dir_entry *proc_mckinley_root __ro_after_init;
+static struct proc_dir_entry *proc_mckinley_root __ro_after_init;
/************************************
** SBA register read and write support
@@ -204,7 +202,7 @@ static void
sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
{
/* start printing from lowest pde in rval */
- u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
+ __le64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
uint rcnt;
@@ -571,7 +569,7 @@ typedef unsigned long space_t;
*/
static void
-sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
+sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
unsigned long hint)
{
u64 pa; /* physical address */
@@ -615,7 +613,7 @@ static void
sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
{
u32 iovp = (u32) SBA_IOVP(ioc,iova);
- u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
+ __le64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
#ifdef ASSERT_PDIR_SANITY
/* Assert first pdir entry is set.
@@ -716,7 +714,7 @@ sba_map_single(struct device *dev, void *addr, size_t size,
unsigned long flags;
dma_addr_t iovp;
dma_addr_t offset;
- u64 *pdir_start;
+ __le64 *pdir_start;
int pide;
ioc = GET_IOC(dev);
@@ -1434,7 +1432,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
- DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
+ DBG_INIT("%s() hpa %px mem %ldMB IOV %dMB (%d bits)\n",
__func__,
ioc->ioc_hpa,
(unsigned long) totalram_pages() >> (20 - PAGE_SHIFT),
@@ -1471,7 +1469,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
#endif
- DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
+ DBG_INIT("%s() IOV base %#lx mask %#0lx\n",
__func__, ioc->ibase, ioc->imask);
/*
@@ -1583,7 +1581,7 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
if (!IS_PLUTO(sba_dev->dev)) {
ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
- DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
+ DBG_INIT("%s() hpa %px ioc_ctl 0x%Lx ->",
__func__, sba_dev->sba_hpa, ioc_ctl);
ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC;
@@ -1668,14 +1666,14 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
/* flush out the last writes */
READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
- DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
+ DBG_INIT(" ioc[%d] ROPE_CFG %#lx ROPE_DBG %lx\n",
i,
- READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
- READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
+ (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
+ (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
);
- DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n",
- READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
- READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
+ DBG_INIT(" STATUS_CONTROL %#lx FLUSH_CTRL %#lx\n",
+ (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
+ (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
);
if (IS_PLUTO(sba_dev->dev)) {
@@ -1739,7 +1737,7 @@ sba_common_init(struct sba_device *sba_dev)
#ifdef ASSERT_PDIR_SANITY
/* Mark first bit busy - ie no IOVA 0 */
sba_dev->ioc[i].res_map[0] = 0x80;
- sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
+ sba_dev->ioc[i].pdir_base[0] = (__force __le64) 0xeeffc0addbba0080ULL;
#endif
/* Third (and last) part of PIRANHA BUG */
@@ -1899,9 +1897,7 @@ static int __init sba_driver_callback(struct parisc_device *dev)
int i;
char *version;
void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE);
-#ifdef CONFIG_PROC_FS
- struct proc_dir_entry *root;
-#endif
+ struct proc_dir_entry *root __maybe_unused;
sba_dump_ranges(sba_addr);
@@ -1967,7 +1963,6 @@ static int __init sba_driver_callback(struct parisc_device *dev)
hppa_dma_ops = &sba_ops;
-#ifdef CONFIG_PROC_FS
switch (dev->id.hversion) {
case PLUTO_MCKINLEY_PORT:
if (!proc_mckinley_root)
@@ -1985,7 +1980,6 @@ static int __init sba_driver_callback(struct parisc_device *dev)
proc_create_single("sba_iommu", 0, root, sba_proc_info);
proc_create_single("sba_iommu-bitmap", 0, root, sba_proc_bitmap_info);
-#endif
return 0;
}
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 49bd09c7dd0a..e9ae66cc4189 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -196,7 +196,7 @@ config PCI_HYPERV
config PCI_DYNAMIC_OF_NODES
bool "Create Device tree nodes for PCI devices"
- depends on OF
+ depends on OF_IRQ
select OF_DYNAMIC
help
This option enables support for generating device tree nodes for some
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index e2f29404c84e..64420ecc24d1 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -43,7 +43,6 @@
#define PARF_PHY_REFCLK 0x4c
#define PARF_CONFIG_BITS 0x50
#define PARF_DBI_BASE_ADDR 0x168
-#define PARF_SLV_ADDR_SPACE_SIZE_2_3_3 0x16c /* Register offset specific to IP ver 2.3.3 */
#define PARF_MHI_CLOCK_RESET_CTRL 0x174
#define PARF_AXI_MSTR_WR_ADDR_HALT 0x178
#define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8
@@ -797,8 +796,7 @@ static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 val;
- writel(SLV_ADDR_SPACE_SZ,
- pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_2_3_3);
+ writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
val = readl(pcie->parf + PARF_PHY_CTRL);
val &= ~PHY_TEST_PWR_DOWN;
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 2af64bcb7da3..51e3dd0ea5ab 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -657,30 +657,33 @@ void of_pci_make_dev_node(struct pci_dev *pdev)
cset = kmalloc(sizeof(*cset), GFP_KERNEL);
if (!cset)
- goto failed;
+ goto out_free_name;
of_changeset_init(cset);
np = of_changeset_create_node(cset, ppnode, name);
if (!np)
- goto failed;
- np->data = cset;
+ goto out_destroy_cset;
ret = of_pci_add_properties(pdev, cset, np);
if (ret)
- goto failed;
+ goto out_free_node;
ret = of_changeset_apply(cset);
if (ret)
- goto failed;
+ goto out_free_node;
+ np->data = cset;
pdev->dev.of_node = np;
kfree(name);
return;
-failed:
- if (np)
- of_node_put(np);
+out_free_node:
+ of_node_put(np);
+out_destroy_cset:
+ of_changeset_destroy(cset);
+ kfree(cset);
+out_free_name:
kfree(name);
}
#endif
diff --git a/drivers/pci/of_property.c b/drivers/pci/of_property.c
index 710ec35ba4a1..c2c7334152bc 100644
--- a/drivers/pci/of_property.c
+++ b/drivers/pci/of_property.c
@@ -186,8 +186,8 @@ static int of_pci_prop_interrupts(struct pci_dev *pdev,
static int of_pci_prop_intr_map(struct pci_dev *pdev, struct of_changeset *ocs,
struct device_node *np)
{
+ u32 i, addr_sz[OF_PCI_MAX_INT_PIN] = { 0 }, map_sz = 0;
struct of_phandle_args out_irq[OF_PCI_MAX_INT_PIN];
- u32 i, addr_sz[OF_PCI_MAX_INT_PIN], map_sz = 0;
__be32 laddr[OF_PCI_ADDRESS_CELLS] = { 0 };
u32 int_map_mask[] = { 0xffff00, 0, 0, 7 };
struct device_node *pnode;
@@ -213,33 +213,44 @@ static int of_pci_prop_intr_map(struct pci_dev *pdev, struct of_changeset *ocs,
out_irq[i].args[0] = pin;
ret = of_irq_parse_raw(laddr, &out_irq[i]);
if (ret) {
- pci_err(pdev, "parse irq %d failed, ret %d", pin, ret);
+ out_irq[i].np = NULL;
+ pci_dbg(pdev, "parse irq %d failed, ret %d", pin, ret);
continue;
}
- ret = of_property_read_u32(out_irq[i].np, "#address-cells",
- &addr_sz[i]);
- if (ret)
- addr_sz[i] = 0;
+ of_property_read_u32(out_irq[i].np, "#address-cells",
+ &addr_sz[i]);
}
list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
for (pin = 1; pin <= OF_PCI_MAX_INT_PIN; pin++) {
i = pci_swizzle_interrupt_pin(child, pin) - 1;
+ if (!out_irq[i].np)
+ continue;
map_sz += 5 + addr_sz[i] + out_irq[i].args_count;
}
}
+ /*
+ * Parsing interrupt failed for all pins. In this case, it does not
+ * need to generate interrupt-map property.
+ */
+ if (!map_sz)
+ return 0;
+
int_map = kcalloc(map_sz, sizeof(u32), GFP_KERNEL);
mapp = int_map;
list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
for (pin = 1; pin <= OF_PCI_MAX_INT_PIN; pin++) {
+ i = pci_swizzle_interrupt_pin(child, pin) - 1;
+ if (!out_irq[i].np)
+ continue;
+
*mapp = (child->bus->number << 16) |
(child->devfn << 8);
mapp += OF_PCI_ADDRESS_CELLS;
*mapp = pin;
mapp++;
- i = pci_swizzle_interrupt_pin(child, pin) - 1;
*mapp = out_irq[i].np->phandle;
mapp++;
if (addr_sz[i]) {
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index a79c110c7e51..51ec9e7e784f 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -572,7 +572,19 @@ static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
static void pci_pm_bridge_power_up_actions(struct pci_dev *pci_dev)
{
- pci_bridge_wait_for_secondary_bus(pci_dev, "resume");
+ int ret;
+
+ ret = pci_bridge_wait_for_secondary_bus(pci_dev, "resume");
+ if (ret) {
+ /*
+ * The downstream link failed to come up, so mark the
+ * devices below as disconnected to make sure we don't
+ * attempt to resume them.
+ */
+ pci_walk_bus(pci_dev->subordinate, pci_dev_set_disconnected,
+ NULL);
+ return;
+ }
/*
* When powering on a bridge from D3cold, the whole hierarchy may be
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index e85ff946e8c8..9c8fd69ae5ad 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -229,6 +229,7 @@ int pcie_aer_is_native(struct pci_dev *dev)
return pcie_ports_native || host->native_aer;
}
+EXPORT_SYMBOL_NS_GPL(pcie_aer_is_native, CXL);
static int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 58a2b1a1cae4..1f3803bde7ee 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -29,10 +29,8 @@ extern bool pcie_ports_dpc_native;
#ifdef CONFIG_PCIEAER
int pcie_aer_init(void);
-int pcie_aer_is_native(struct pci_dev *dev);
#else
static inline int pcie_aer_init(void) { return 0; }
-static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; }
#endif
#ifdef CONFIG_HOTPLUG_PCI_PCIE
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ab2a4a3a4c06..795534589b98 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -997,6 +997,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
res = window->res;
if (!res->flags && !res->start && !res->end) {
release_resource(res);
+ resource_list_destroy_entry(window);
continue;
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 5de09d2eb014..eeec1d6f9023 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3726,7 +3726,7 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
*/
static void quirk_nvidia_no_bus_reset(struct pci_dev *dev)
{
- if ((dev->device & 0xffc0) == 0x2340 || dev->device == 0x1eb8)
+ if ((dev->device & 0xffc0) == 0x2340)
quirk_no_bus_reset(dev);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 913dc04b3a40..6b50bc551984 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -1972,7 +1972,7 @@ static irqreturn_t arm_cmn_handle_irq(int irq, void *dev_id)
u64 delta;
int i;
- for (i = 0; i < CMN_DTM_NUM_COUNTERS; i++) {
+ for (i = 0; i < CMN_DT_NUM_COUNTERS; i++) {
if (status & (1U << i)) {
ret = IRQ_HANDLED;
if (WARN_ON(!dtc->counters[i]))
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index e5a2ac4155f6..8fcaa26f0f8a 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -749,6 +749,8 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu)
/* Enable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
+
+ kvm_vcpu_pmu_resync_el0();
}
static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
diff --git a/drivers/perf/cxl_pmu.c b/drivers/perf/cxl_pmu.c
index 0a8f597e695b..365d964b0f6a 100644
--- a/drivers/perf/cxl_pmu.c
+++ b/drivers/perf/cxl_pmu.c
@@ -25,7 +25,7 @@
#include "../cxl/pmu.h"
#define CXL_PMU_CAP_REG 0x0
-#define CXL_PMU_CAP_NUM_COUNTERS_MSK GENMASK_ULL(4, 0)
+#define CXL_PMU_CAP_NUM_COUNTERS_MSK GENMASK_ULL(5, 0)
#define CXL_PMU_CAP_COUNTER_WIDTH_MSK GENMASK_ULL(15, 8)
#define CXL_PMU_CAP_NUM_EVN_CAP_REG_SUP_MSK GENMASK_ULL(24, 20)
#define CXL_PMU_CAP_FILTERS_SUP_MSK GENMASK_ULL(39, 32)
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index aac670b90589..d1670bbe6d6b 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -93,6 +93,7 @@ source "drivers/phy/rockchip/Kconfig"
source "drivers/phy/samsung/Kconfig"
source "drivers/phy/socionext/Kconfig"
source "drivers/phy/st/Kconfig"
+source "drivers/phy/starfive/Kconfig"
source "drivers/phy/sunplus/Kconfig"
source "drivers/phy/tegra/Kconfig"
source "drivers/phy/ti/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index ba7c100b14fc..868a220ed0f6 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -32,6 +32,7 @@ obj-y += allwinner/ \
samsung/ \
socionext/ \
st/ \
+ starfive/ \
sunplus/ \
tegra/ \
ti/ \
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index 56d53f78d002..ec551464dd4f 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -23,8 +23,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-sun4i-usb.h>
diff --git a/drivers/phy/allwinner/phy-sun50i-usb3.c b/drivers/phy/allwinner/phy-sun50i-usb3.c
index 84055b720016..363f9a0df503 100644
--- a/drivers/phy/allwinner/phy-sun50i-usb3.c
+++ b/drivers/phy/allwinner/phy-sun50i-usb3.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
index 6e9af79e152c..08a86962d949 100644
--- a/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
+++ b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
@@ -13,8 +13,8 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/phy/phy.h>
diff --git a/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c b/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
index a3e1108b736d..ae898f93f97b 100644
--- a/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
+++ b/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
@@ -11,6 +11,7 @@
#include <linux/regmap.h>
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <dt-bindings/phy/phy.h>
diff --git a/drivers/phy/amlogic/phy-meson-axg-pcie.c b/drivers/phy/amlogic/phy-meson-axg-pcie.c
index 2299bab38e05..60be5cdc600b 100644
--- a/drivers/phy/amlogic/phy-meson-axg-pcie.c
+++ b/drivers/phy/amlogic/phy-meson-axg-pcie.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2020 Remi Pommarel <repk@triplefau.lt>
*/
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c b/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c
index cabdddbbabfd..46e5f7e7eb6c 100644
--- a/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c
+++ b/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c
@@ -13,6 +13,7 @@
#include <linux/regmap.h>
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <dt-bindings/phy/phy.h>
diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb2.c b/drivers/phy/amlogic/phy-meson-g12a-usb2.c
index ec2555bb83d5..0e0b5c00b676 100644
--- a/drivers/phy/amlogic/phy-meson-g12a-usb2.c
+++ b/drivers/phy/amlogic/phy-meson-g12a-usb2.c
@@ -14,7 +14,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/phy/phy.h>
@@ -319,7 +319,7 @@ static int phy_meson_g12a_usb2_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- priv->soc_id = (enum meson_soc_id)of_device_get_match_data(&pdev->dev);
+ priv->soc_id = (uintptr_t)of_device_get_match_data(&pdev->dev);
priv->regmap = devm_regmap_init_mmio(dev, base,
&phy_meson_g12a_usb2_regmap_conf);
diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
index d2a1da8d9e58..2712c4bd549d 100644
--- a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
+++ b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
@@ -11,7 +11,7 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/regmap.h>
#include <linux/reset.h>
diff --git a/drivers/phy/amlogic/phy-meson-gxl-usb2.c b/drivers/phy/amlogic/phy-meson-gxl-usb2.c
index db17c3448bfe..14ea89927ab1 100644
--- a/drivers/phy/amlogic/phy-meson-gxl-usb2.c
+++ b/drivers/phy/amlogic/phy-meson-gxl-usb2.c
@@ -8,8 +8,8 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/phy/phy.h>
diff --git a/drivers/phy/amlogic/phy-meson8-hdmi-tx.c b/drivers/phy/amlogic/phy-meson8-hdmi-tx.c
index f9a6572c27d8..2617f7f6c2ec 100644
--- a/drivers/phy/amlogic/phy-meson8-hdmi-tx.c
+++ b/drivers/phy/amlogic/phy-meson8-hdmi-tx.c
@@ -10,7 +10,7 @@
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/property.h>
diff --git a/drivers/phy/amlogic/phy-meson8b-usb2.c b/drivers/phy/amlogic/phy-meson8b-usb2.c
index dd96763911b8..d63147c41b8c 100644
--- a/drivers/phy/amlogic/phy-meson8b-usb2.c
+++ b/drivers/phy/amlogic/phy-meson8b-usb2.c
@@ -8,8 +8,8 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/reset.h>
diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb3.c b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
index bbfad209c890..69584b685edb 100644
--- a/drivers/phy/broadcom/phy-bcm-ns-usb3.c
+++ b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
@@ -206,7 +206,7 @@ static int bcm_ns_usb3_mdio_probe(struct mdio_device *mdiodev)
of_id = of_match_device(bcm_ns_usb3_id_table, dev);
if (!of_id)
return -EINVAL;
- usb3->family = (enum bcm_ns_family)of_id->data;
+ usb3->family = (uintptr_t)of_id->data;
syscon_np = of_parse_phandle(dev->of_node, "usb3-dmp-syscon", 0);
err = of_address_to_resource(syscon_np, 0, &res);
diff --git a/drivers/phy/broadcom/phy-bcm-sr-usb.c b/drivers/phy/broadcom/phy-bcm-sr-usb.c
index 0002da3b5b5d..b0bd18a5df87 100644
--- a/drivers/phy/broadcom/phy-bcm-sr-usb.c
+++ b/drivers/phy/broadcom/phy-bcm-sr-usb.c
@@ -311,7 +311,7 @@ static int bcm_usb_phy_probe(struct platform_device *pdev)
of_id = of_match_node(bcm_usb_phy_of_match, dn);
if (of_id)
- version = (enum bcm_usb_phy_version)of_id->data;
+ version = (uintptr_t)of_id->data;
else
return -ENODEV;
diff --git a/drivers/phy/broadcom/phy-bcm63xx-usbh.c b/drivers/phy/broadcom/phy-bcm63xx-usbh.c
index 6c05ba8b08be..f8183dea774b 100644
--- a/drivers/phy/broadcom/phy-bcm63xx-usbh.c
+++ b/drivers/phy/broadcom/phy-bcm63xx-usbh.c
@@ -17,6 +17,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index 769c707d9b71..ed9e18791ec9 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -772,7 +772,7 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
of_id = of_match_node(brcm_sata_phy_of_match, dn);
if (of_id)
- priv->version = (enum brcm_sata_phy_version)of_id->data;
+ priv->version = (uintptr_t)of_id->data;
else
priv->version = BRCM_SATA_PHY_STB_28NM;
diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
index a4cfb777dd83..a16f0b58eb74 100644
--- a/drivers/phy/broadcom/phy-brcm-usb.c
+++ b/drivers/phy/broadcom/phy-brcm-usb.c
@@ -11,7 +11,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
diff --git a/drivers/phy/cadence/cdns-dphy-rx.c b/drivers/phy/cadence/cdns-dphy-rx.c
index c05b043893a9..7729cf80a9bd 100644
--- a/drivers/phy/cadence/cdns-dphy-rx.c
+++ b/drivers/phy/cadence/cdns-dphy-rx.c
@@ -7,6 +7,7 @@
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-mipi-dphy.h>
diff --git a/drivers/phy/cadence/cdns-dphy.c b/drivers/phy/cadence/cdns-dphy.c
index 6e58012b6488..dddb66de6dba 100644
--- a/drivers/phy/cadence/cdns-dphy.c
+++ b/drivers/phy/cadence/cdns-dphy.c
@@ -9,8 +9,7 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index 7df9c79a772a..d4eb93ce8232 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -30,23 +30,34 @@
#define SIERRA_COMMON_CDB_OFFSET 0x0
#define SIERRA_MACRO_ID_REG 0x0
#define SIERRA_CMN_PLLLC_GEN_PREG 0x42
+#define SIERRA_CMN_PLLLC_FBDIV_INT_MODE0_PREG 0x43
+#define SIERRA_CMN_PLLLC_DCOCAL_CTRL_PREG 0x45
+#define SIERRA_CMN_PLLLC_INIT_PREG 0x46
+#define SIERRA_CMN_PLLLC_ITERTMR_PREG 0x47
#define SIERRA_CMN_PLLLC_MODE_PREG 0x48
#define SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG 0x49
#define SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG 0x4A
#define SIERRA_CMN_PLLLC_LOCK_CNTSTART_PREG 0x4B
+#define SIERRA_CMN_PLLLC_LOCKSEARCH_PREG 0x4C
#define SIERRA_CMN_PLLLC_CLK1_PREG 0x4D
+#define SIERRA_CMN_PLLLC_CLK0_PREG 0x4E
#define SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG 0x4F
#define SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG 0x50
#define SIERRA_CMN_PLLLC_DSMCORR_PREG 0x51
#define SIERRA_CMN_PLLLC_SS_PREG 0x52
#define SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG 0x53
#define SIERRA_CMN_PLLLC_SSTWOPT_PREG 0x54
+#define SIERRA_CMN_PLLCSM_PLLEN_TMR_PREG 0x5D
+#define SIERRA_CMN_PLLCSM_PLLPRE_TMR_PREG 0x5E
#define SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG 0x62
#define SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG 0x63
+#define SIERRA_SDOSCCAL_CLK_CNT_PREG 0x6E
#define SIERRA_CMN_REFRCV_PREG 0x98
+#define SIERRA_CMN_RESCAL_CTRLA_PREG 0xA0
#define SIERRA_CMN_REFRCV1_PREG 0xB8
#define SIERRA_CMN_PLLLC1_GEN_PREG 0xC2
#define SIERRA_CMN_PLLLC1_FBDIV_INT_PREG 0xC3
+#define SIERRA_CMN_PLLLC1_DCOCAL_CTRL_PREG 0xC5
#define SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG 0xCA
#define SIERRA_CMN_PLLLC1_CLK0_PREG 0xCE
#define SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG 0xD0
@@ -86,6 +97,7 @@
#define SIERRA_DFE_BIASTRIM_PREG 0x04C
#define SIERRA_DRVCTRL_ATTEN_PREG 0x06A
#define SIERRA_DRVCTRL_BOOST_PREG 0x06F
+#define SIERRA_LANE_TX_RECEIVER_DETECT_PREG 0x071
#define SIERRA_TX_RCVDET_OVRD_PREG 0x072
#define SIERRA_CLKPATHCTRL_TMR_PREG 0x081
#define SIERRA_RX_CREQ_FLTR_A_MODE3_PREG 0x085
@@ -101,6 +113,8 @@
#define SIERRA_CREQ_SPARE_PREG 0x096
#define SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG 0x097
#define SIERRA_CTLELUT_CTRL_PREG 0x098
+#define SIERRA_DEQ_BLK_TAU_CTRL1_PREG 0x0AC
+#define SIERRA_DEQ_BLK_TAU_CTRL4_PREG 0x0AF
#define SIERRA_DFE_ECMP_RATESEL_PREG 0x0C0
#define SIERRA_DFE_SMP_RATESEL_PREG 0x0C1
#define SIERRA_DEQ_PHALIGN_CTRL 0x0C4
@@ -129,6 +143,9 @@
#define SIERRA_DEQ_GLUT14 0x0F6
#define SIERRA_DEQ_GLUT15 0x0F7
#define SIERRA_DEQ_GLUT16 0x0F8
+#define SIERRA_POSTPRECUR_EN_CEPH_CTRL_PREG 0x0F9
+#define SIERRA_TAU_EN_CEPH2TO0_PREG 0x0FB
+#define SIERRA_TAU_EN_CEPH5TO3_PREG 0x0FC
#define SIERRA_DEQ_ALUT0 0x108
#define SIERRA_DEQ_ALUT1 0x109
#define SIERRA_DEQ_ALUT2 0x10A
@@ -143,6 +160,7 @@
#define SIERRA_DEQ_ALUT11 0x113
#define SIERRA_DEQ_ALUT12 0x114
#define SIERRA_DEQ_ALUT13 0x115
+#define SIERRA_OEPH_EN_CTRL_PREG 0x124
#define SIERRA_DEQ_DFETAP_CTRL_PREG 0x128
#define SIERRA_DEQ_DFETAP0 0x129
#define SIERRA_DEQ_DFETAP1 0x12B
@@ -157,6 +175,7 @@
#define SIERRA_DEQ_TAU_CTRL2_PREG 0x151
#define SIERRA_DEQ_TAU_CTRL3_PREG 0x152
#define SIERRA_DEQ_OPENEYE_CTRL_PREG 0x158
+#define SIERRA_DEQ_CONCUR_EPIOFFSET_MODE_PREG 0x159
#define SIERRA_DEQ_PICTRL_PREG 0x161
#define SIERRA_CPICAL_TMRVAL_MODE1_PREG 0x170
#define SIERRA_CPICAL_TMRVAL_MODE0_PREG 0x171
@@ -165,6 +184,7 @@
#define SIERRA_CPI_RESBIAS_BIN_PREG 0x17E
#define SIERRA_CPI_TRIM_PREG 0x17F
#define SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG 0x183
+#define SIERRA_CPICAL_RES_STARTCODE_MODE01_PREG 0x184
#define SIERRA_EPI_CTRL_PREG 0x187
#define SIERRA_LFPSDET_SUPPORT_PREG 0x188
#define SIERRA_LFPSFILT_NS_PREG 0x18A
@@ -176,6 +196,7 @@
#define SIERRA_RXBUFFER_CTLECTRL_PREG 0x19E
#define SIERRA_RXBUFFER_RCDFECTRL_PREG 0x19F
#define SIERRA_RXBUFFER_DFECTRL_PREG 0x1A0
+#define SIERRA_LN_SPARE_REG_PREG 0x1B0
#define SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG 0x14F
#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
@@ -2402,6 +2423,77 @@ static struct cdns_sierra_vals usb_100_ext_ssc_ln_vals = {
.num_regs = ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
};
+/* SGMII PHY common configuration */
+static const struct cdns_reg_pairs sgmii_pma_cmn_vals[] = {
+ {0x0180, SIERRA_SDOSCCAL_CLK_CNT_PREG},
+ {0x6000, SIERRA_CMN_REFRCV_PREG},
+ {0x0031, SIERRA_CMN_RESCAL_CTRLA_PREG},
+ {0x001C, SIERRA_CMN_PLLLC_FBDIV_INT_MODE0_PREG},
+ {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_LOCKSEARCH_PREG},
+ {0x8103, SIERRA_CMN_PLLLC_CLK0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x0027, SIERRA_CMN_PLLCSM_PLLEN_TMR_PREG},
+ {0x0062, SIERRA_CMN_PLLCSM_PLLPRE_TMR_PREG},
+ {0x0800, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_INIT_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_ITERTMR_PREG},
+ {0x0020, SIERRA_CMN_PLLLC_LOCK_CNTSTART_PREG},
+ {0x0013, SIERRA_CMN_PLLLC_DCOCAL_CTRL_PREG},
+ {0x0013, SIERRA_CMN_PLLLC1_DCOCAL_CTRL_PREG},
+};
+
+static struct cdns_sierra_vals sgmii_cmn_vals = {
+ .reg_pairs = sgmii_pma_cmn_vals,
+ .num_regs = ARRAY_SIZE(sgmii_pma_cmn_vals),
+};
+
+/* SGMII PHY lane configuration */
+static const struct cdns_reg_pairs sgmii_ln_regs[] = {
+ {0x691E, SIERRA_DET_STANDEC_D_PREG},
+ {0x0FFE, SIERRA_PSC_RX_A0_PREG},
+ {0x0104, SIERRA_PLLCTRL_FBDIV_MODE01_PREG},
+ {0x0013, SIERRA_PLLCTRL_SUBRATE_PREG},
+ {0x0106, SIERRA_PLLCTRL_GEN_D_PREG},
+ {0x5234, SIERRA_PLLCTRL_CPGAIN_MODE_PREG},
+ {0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
+ {0x00AB, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x3C0E, SIERRA_CREQ_CCLKDET_MODE01_PREG},
+ {0x3220, SIERRA_CREQ_FSMCLK_SEL_PREG},
+ {0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
+ {0x6320, SIERRA_DEQ_CONCUR_EPIOFFSET_MODE_PREG},
+ {0x0000, SIERRA_CPI_OUTBUF_RATESEL_PREG},
+ {0x15A2, SIERRA_LN_SPARE_REG_PREG},
+ {0x7900, SIERRA_DEQ_BLK_TAU_CTRL1_PREG},
+ {0x2202, SIERRA_DEQ_BLK_TAU_CTRL4_PREG},
+ {0x2206, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0005, SIERRA_LANE_TX_RECEIVER_DETECT_PREG},
+ {0x8001, SIERRA_CREQ_SPARE_PREG},
+ {0x0000, SIERRA_DEQ_CONCUR_CTRL1_PREG},
+ {0xD004, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x0101, SIERRA_DEQ_GLUT9},
+ {0x0101, SIERRA_DEQ_GLUT10},
+ {0x0101, SIERRA_DEQ_GLUT11},
+ {0x0101, SIERRA_DEQ_GLUT12},
+ {0x0000, SIERRA_DEQ_GLUT13},
+ {0x0000, SIERRA_DEQ_GLUT16},
+ {0x0000, SIERRA_POSTPRECUR_EN_CEPH_CTRL_PREG},
+ {0x0000, SIERRA_TAU_EN_CEPH2TO0_PREG},
+ {0x0003, SIERRA_TAU_EN_CEPH5TO3_PREG},
+ {0x0101, SIERRA_DEQ_ALUT8},
+ {0x0101, SIERRA_DEQ_ALUT9},
+ {0x0100, SIERRA_DEQ_ALUT10},
+ {0x0000, SIERRA_OEPH_EN_CTRL_PREG},
+ {0x5425, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x7458, SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG},
+ {0x321F, SIERRA_CPICAL_RES_STARTCODE_MODE01_PREG},
+};
+
+static struct cdns_sierra_vals sgmii_pma_ln_vals = {
+ .reg_pairs = sgmii_ln_regs,
+ .num_regs = ARRAY_SIZE(sgmii_ln_regs),
+};
+
static const struct cdns_sierra_data cdns_map_sierra = {
.id_value = SIERRA_MACRO_ID,
.block_offset_shift = 0x2,
@@ -2449,6 +2541,9 @@ static const struct cdns_sierra_data cdns_map_sierra = {
},
},
[TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sgmii_cmn_vals,
+ },
[TYPE_PCIE] = {
[NO_SSC] = &sgmii_100_no_ssc_plllc1_opt3_cmn_vals,
[EXTERNAL_SSC] = &sgmii_100_no_ssc_plllc1_opt3_cmn_vals,
@@ -2487,6 +2582,9 @@ static const struct cdns_sierra_data cdns_map_sierra = {
},
},
[TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sgmii_pma_ln_vals,
+ },
[TYPE_PCIE] = {
[NO_SSC] = &sgmii_100_no_ssc_plllc1_opt3_ln_vals,
[EXTERNAL_SSC] = &sgmii_100_no_ssc_plllc1_opt3_ln_vals,
diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c
index 37b6b5c05be8..a75c96385c57 100644
--- a/drivers/phy/cadence/phy-cadence-torrent.c
+++ b/drivers/phy/cadence/phy-cadence-torrent.c
@@ -17,8 +17,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
@@ -27,14 +25,11 @@
#define REF_CLK_19_2MHZ 19200000
#define REF_CLK_25MHZ 25000000
#define REF_CLK_100MHZ 100000000
+#define REF_CLK_156_25MHZ 156250000
#define MAX_NUM_LANES 4
#define DEFAULT_MAX_BIT_RATE 8100 /* in Mbps */
-#define NUM_SSC_MODE 3
-#define NUM_REF_CLK 3
-#define NUM_PHY_TYPE 6
-
#define POLL_TIMEOUT_US 5000
#define PLL_LOCK_TIMEOUT 100000
@@ -106,6 +101,7 @@
#define CMN_PLL0_HIGH_THR_M0 0x0093U
#define CMN_PLL0_DSM_DIAG_M0 0x0094U
#define CMN_PLL0_DSM_FBH_OVRD_M0 0x0095U
+#define CMN_PLL0_DSM_FBL_OVRD_M0 0x0096U
#define CMN_PLL0_SS_CTRL1_M0 0x0098U
#define CMN_PLL0_SS_CTRL2_M0 0x0099U
#define CMN_PLL0_SS_CTRL3_M0 0x009AU
@@ -196,6 +192,10 @@
#define RX_PSC_A2 0x0002U
#define RX_PSC_A3 0x0003U
#define RX_PSC_CAL 0x0006U
+#define RX_SDCAL0_INIT_TMR 0x0044U
+#define RX_SDCAL0_ITER_TMR 0x0045U
+#define RX_SDCAL1_INIT_TMR 0x004CU
+#define RX_SDCAL1_ITER_TMR 0x004DU
#define RX_CDRLF_CNFG 0x0080U
#define RX_CDRLF_CNFG3 0x0082U
#define RX_SIGDET_HL_FILT_TMR 0x0090U
@@ -294,20 +294,49 @@ enum cdns_torrent_phy_type {
TYPE_SGMII,
TYPE_QSGMII,
TYPE_USB,
+ TYPE_USXGMII,
};
enum cdns_torrent_ref_clk {
CLK_19_2_MHZ,
CLK_25_MHZ,
- CLK_100_MHZ
+ CLK_100_MHZ,
+ CLK_156_25_MHZ,
+ CLK_ANY,
};
enum cdns_torrent_ssc_mode {
NO_SSC,
EXTERNAL_SSC,
- INTERNAL_SSC
+ INTERNAL_SSC,
+ ANY_SSC,
};
+/* Unique key id for vals table entry
+ * REFCLK0_RATE | REFCLK1_RATE | LINK0_TYPE | LINK1_TYPE | SSC_TYPE
+ */
+#define REFCLK0_SHIFT 12
+#define REFCLK0_MASK GENMASK(14, 12)
+#define REFCLK1_SHIFT 9
+#define REFCLK1_MASK GENMASK(11, 9)
+#define LINK0_SHIFT 6
+#define LINK0_MASK GENMASK(8, 6)
+#define LINK1_SHIFT 3
+#define LINK1_MASK GENMASK(5, 3)
+#define SSC_SHIFT 0
+#define SSC_MASK GENMASK(2, 0)
+
+#define CDNS_TORRENT_KEY(refclk0, refclk1, link0, link1, ssc) \
+ ((((refclk0) << REFCLK0_SHIFT) & REFCLK0_MASK) | \
+ (((refclk1) << REFCLK1_SHIFT) & REFCLK1_MASK) | \
+ (((link0) << LINK0_SHIFT) & LINK0_MASK) | \
+ (((link1) << LINK1_SHIFT) & LINK1_MASK) | \
+ (((ssc) << SSC_SHIFT) & SSC_MASK))
+
+#define CDNS_TORRENT_KEY_ANYCLK(link0, link1) \
+ CDNS_TORRENT_KEY(CLK_ANY, CLK_ANY, \
+ (link0), (link1), ANY_SSC)
+
struct cdns_torrent_inst {
struct phy *phy;
u32 mlane;
@@ -394,21 +423,26 @@ struct cdns_torrent_vals {
u32 num_regs;
};
+struct cdns_torrent_vals_entry {
+ u32 key;
+ struct cdns_torrent_vals *vals;
+};
+
+struct cdns_torrent_vals_table {
+ struct cdns_torrent_vals_entry *entries;
+ u32 num_entries;
+};
+
struct cdns_torrent_data {
u8 block_offset_shift;
u8 reg_offset_shift;
- struct cdns_torrent_vals *link_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
- [NUM_SSC_MODE];
- struct cdns_torrent_vals *xcvr_diag_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
- [NUM_SSC_MODE];
- struct cdns_torrent_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
- [NUM_SSC_MODE];
- struct cdns_torrent_vals *cmn_vals[NUM_REF_CLK][NUM_PHY_TYPE]
- [NUM_PHY_TYPE][NUM_SSC_MODE];
- struct cdns_torrent_vals *tx_ln_vals[NUM_REF_CLK][NUM_PHY_TYPE]
- [NUM_PHY_TYPE][NUM_SSC_MODE];
- struct cdns_torrent_vals *rx_ln_vals[NUM_REF_CLK][NUM_PHY_TYPE]
- [NUM_PHY_TYPE][NUM_SSC_MODE];
+ struct cdns_torrent_vals_table link_cmn_vals_tbl;
+ struct cdns_torrent_vals_table xcvr_diag_vals_tbl;
+ struct cdns_torrent_vals_table pcs_cmn_vals_tbl;
+ struct cdns_torrent_vals_table phy_pma_cmn_vals_tbl;
+ struct cdns_torrent_vals_table cmn_vals_tbl;
+ struct cdns_torrent_vals_table tx_ln_vals_tbl;
+ struct cdns_torrent_vals_table rx_ln_vals_tbl;
};
struct cdns_regmap_cdb_context {
@@ -417,6 +451,24 @@ struct cdns_regmap_cdb_context {
u8 reg_offset_shift;
};
+static struct cdns_torrent_vals *cdns_torrent_get_tbl_vals(const struct cdns_torrent_vals_table *tbl,
+ enum cdns_torrent_ref_clk refclk0,
+ enum cdns_torrent_ref_clk refclk1,
+ enum cdns_torrent_phy_type link0,
+ enum cdns_torrent_phy_type link1,
+ enum cdns_torrent_ssc_mode ssc)
+{
+ int i;
+ u32 key = CDNS_TORRENT_KEY(refclk0, refclk1, link0, link1, ssc);
+
+ for (i = 0; i < tbl->num_entries; i++) {
+ if (tbl->entries[i].key == key)
+ return tbl->entries[i].vals;
+ }
+
+ return NULL;
+}
+
static int cdns_regmap_write(void *context, unsigned int reg, unsigned int val)
{
struct cdns_regmap_cdb_context *ctx = context;
@@ -644,6 +696,8 @@ static const char *cdns_torrent_get_phy_type(enum cdns_torrent_phy_type phy_type
return "QSGMII";
case TYPE_USB:
return "USB";
+ case TYPE_USXGMII:
+ return "USXGMII";
default:
return "None";
}
@@ -2244,6 +2298,7 @@ static int cdns_torrent_phy_init(struct phy *phy)
struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
enum cdns_torrent_phy_type phy_type = inst->phy_type;
enum cdns_torrent_ssc_mode ssc = inst->ssc_mode;
+ struct cdns_torrent_vals *phy_pma_cmn_vals;
struct cdns_torrent_vals *pcs_cmn_vals;
struct cdns_reg_pairs *reg_pairs;
struct regmap *regmap;
@@ -2258,13 +2313,16 @@ static int cdns_torrent_phy_init(struct phy *phy)
/**
* Spread spectrum generation is not required or supported
- * for SGMII/QSGMII
+ * for SGMII/QSGMII/USXGMII
*/
- if (phy_type == TYPE_SGMII || phy_type == TYPE_QSGMII)
+ if (phy_type == TYPE_SGMII || phy_type == TYPE_QSGMII || phy_type == TYPE_USXGMII)
ssc = NO_SSC;
/* PHY configuration specific registers for single link */
- link_cmn_vals = init_data->link_cmn_vals[phy_type][TYPE_NONE][ssc];
+ link_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->link_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_type, TYPE_NONE,
+ ANY_SSC);
if (link_cmn_vals) {
reg_pairs = link_cmn_vals->reg_pairs;
num_regs = link_cmn_vals->num_regs;
@@ -2281,7 +2339,10 @@ static int cdns_torrent_phy_init(struct phy *phy)
reg_pairs[i].val);
}
- xcvr_diag_vals = init_data->xcvr_diag_vals[phy_type][TYPE_NONE][ssc];
+ xcvr_diag_vals = cdns_torrent_get_tbl_vals(&init_data->xcvr_diag_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_type, TYPE_NONE,
+ ANY_SSC);
if (xcvr_diag_vals) {
reg_pairs = xcvr_diag_vals->reg_pairs;
num_regs = xcvr_diag_vals->num_regs;
@@ -2294,7 +2355,10 @@ static int cdns_torrent_phy_init(struct phy *phy)
}
/* PHY PCS common registers configurations */
- pcs_cmn_vals = init_data->pcs_cmn_vals[phy_type][TYPE_NONE][ssc];
+ pcs_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->pcs_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_type, TYPE_NONE,
+ ANY_SSC);
if (pcs_cmn_vals) {
reg_pairs = pcs_cmn_vals->reg_pairs;
num_regs = pcs_cmn_vals->num_regs;
@@ -2304,8 +2368,25 @@ static int cdns_torrent_phy_init(struct phy *phy)
reg_pairs[i].val);
}
+ /* PHY PMA common registers configurations */
+ phy_pma_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->phy_pma_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_type, TYPE_NONE,
+ ANY_SSC);
+ if (phy_pma_cmn_vals) {
+ reg_pairs = phy_pma_cmn_vals->reg_pairs;
+ num_regs = phy_pma_cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_phy_pma_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
+
/* PMA common registers configurations */
- cmn_vals = init_data->cmn_vals[ref_clk][phy_type][TYPE_NONE][ssc];
+ cmn_vals = cdns_torrent_get_tbl_vals(&init_data->cmn_vals_tbl,
+ ref_clk, ref_clk,
+ phy_type, TYPE_NONE,
+ ssc);
if (cmn_vals) {
reg_pairs = cmn_vals->reg_pairs;
num_regs = cmn_vals->num_regs;
@@ -2316,7 +2397,10 @@ static int cdns_torrent_phy_init(struct phy *phy)
}
/* PMA TX lane registers configurations */
- tx_ln_vals = init_data->tx_ln_vals[ref_clk][phy_type][TYPE_NONE][ssc];
+ tx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->tx_ln_vals_tbl,
+ ref_clk, ref_clk,
+ phy_type, TYPE_NONE,
+ ssc);
if (tx_ln_vals) {
reg_pairs = tx_ln_vals->reg_pairs;
num_regs = tx_ln_vals->num_regs;
@@ -2329,7 +2413,10 @@ static int cdns_torrent_phy_init(struct phy *phy)
}
/* PMA RX lane registers configurations */
- rx_ln_vals = init_data->rx_ln_vals[ref_clk][phy_type][TYPE_NONE][ssc];
+ rx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->rx_ln_vals_tbl,
+ ref_clk, ref_clk,
+ phy_type, TYPE_NONE,
+ ssc);
if (rx_ln_vals) {
reg_pairs = rx_ln_vals->reg_pairs;
num_regs = rx_ln_vals->num_regs;
@@ -2418,7 +2505,9 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
* being configured, but these can be different for particular
* PHY type and are per lane.
*/
- link_cmn_vals = init_data->link_cmn_vals[phy_t1][phy_t2][ssc];
+ link_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->link_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_t1, phy_t2, ANY_SSC);
if (link_cmn_vals) {
reg_pairs = link_cmn_vals->reg_pairs;
num_regs = link_cmn_vals->num_regs;
@@ -2436,7 +2525,9 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
reg_pairs[i].val);
}
- xcvr_diag_vals = init_data->xcvr_diag_vals[phy_t1][phy_t2][ssc];
+ xcvr_diag_vals = cdns_torrent_get_tbl_vals(&init_data->xcvr_diag_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_t1, phy_t2, ANY_SSC);
if (xcvr_diag_vals) {
reg_pairs = xcvr_diag_vals->reg_pairs;
num_regs = xcvr_diag_vals->num_regs;
@@ -2449,7 +2540,9 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
}
/* PHY PCS common registers configurations */
- pcs_cmn_vals = init_data->pcs_cmn_vals[phy_t1][phy_t2][ssc];
+ pcs_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->pcs_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_t1, phy_t2, ANY_SSC);
if (pcs_cmn_vals) {
reg_pairs = pcs_cmn_vals->reg_pairs;
num_regs = pcs_cmn_vals->num_regs;
@@ -2460,7 +2553,9 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
}
/* PMA common registers configurations */
- cmn_vals = init_data->cmn_vals[ref_clk][phy_t1][phy_t2][ssc];
+ cmn_vals = cdns_torrent_get_tbl_vals(&init_data->cmn_vals_tbl,
+ ref_clk, ref_clk,
+ phy_t1, phy_t2, ssc);
if (cmn_vals) {
reg_pairs = cmn_vals->reg_pairs;
num_regs = cmn_vals->num_regs;
@@ -2471,7 +2566,9 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
}
/* PMA TX lane registers configurations */
- tx_ln_vals = init_data->tx_ln_vals[ref_clk][phy_t1][phy_t2][ssc];
+ tx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->tx_ln_vals_tbl,
+ ref_clk, ref_clk,
+ phy_t1, phy_t2, ssc);
if (tx_ln_vals) {
reg_pairs = tx_ln_vals->reg_pairs;
num_regs = tx_ln_vals->num_regs;
@@ -2484,7 +2581,9 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
}
/* PMA RX lane registers configurations */
- rx_ln_vals = init_data->rx_ln_vals[ref_clk][phy_t1][phy_t2][ssc];
+ rx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->rx_ln_vals_tbl,
+ ref_clk, ref_clk,
+ phy_t1, phy_t2, ssc);
if (rx_ln_vals) {
reg_pairs = rx_ln_vals->reg_pairs;
num_regs = rx_ln_vals->num_regs;
@@ -2617,6 +2716,9 @@ static int cdns_torrent_clk(struct cdns_torrent_phy *cdns_phy)
case REF_CLK_100MHZ:
cdns_phy->ref_clk_rate = CLK_100_MHZ;
break;
+ case REF_CLK_156_25MHZ:
+ cdns_phy->ref_clk_rate = CLK_156_25_MHZ;
+ break;
default:
dev_err(cdns_phy->dev, "Invalid Ref Clock Rate\n");
clk_disable_unprepare(cdns_phy->clk);
@@ -2736,6 +2838,9 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
case PHY_TYPE_USB3:
cdns_phy->phys[node].phy_type = TYPE_USB;
break;
+ case PHY_TYPE_USXGMII:
+ cdns_phy->phys[node].phy_type = TYPE_USXGMII;
+ break;
default:
dev_err(dev, "Unsupported protocol\n");
ret = -EINVAL;
@@ -2929,6 +3034,123 @@ static struct cdns_torrent_vals dp_usb_xcvr_diag_ln_vals = {
.num_regs = ARRAY_SIZE(dp_usb_xcvr_diag_ln_regs),
};
+/* TI USXGMII configuration: Enable cmn_refclk_rcv_out_en */
+static struct cdns_reg_pairs ti_usxgmii_phy_pma_cmn_regs[] = {
+ {0x0040, PHY_PMA_CMN_CTRL1},
+};
+
+static struct cdns_torrent_vals ti_usxgmii_phy_pma_cmn_vals = {
+ .reg_pairs = ti_usxgmii_phy_pma_cmn_regs,
+ .num_regs = ARRAY_SIZE(ti_usxgmii_phy_pma_cmn_regs),
+};
+
+/* Single USXGMII link configuration */
+static struct cdns_reg_pairs sl_usxgmii_link_cmn_regs[] = {
+ {0x0000, PHY_PLL_CFG},
+ {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M0}
+};
+
+static struct cdns_reg_pairs sl_usxgmii_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0001, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals sl_usxgmii_link_cmn_vals = {
+ .reg_pairs = sl_usxgmii_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_usxgmii_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals sl_usxgmii_xcvr_diag_ln_vals = {
+ .reg_pairs = sl_usxgmii_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(sl_usxgmii_xcvr_diag_ln_regs),
+};
+
+/* Single link USXGMII, 156.25 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs sl_usxgmii_156_25_no_ssc_cmn_regs[] = {
+ {0x0014, CMN_SSM_BIAS_TMR},
+ {0x0028, CMN_PLLSM0_PLLPRE_TMR},
+ {0x00A4, CMN_PLLSM0_PLLLOCK_TMR},
+ {0x0028, CMN_PLLSM1_PLLPRE_TMR},
+ {0x00A4, CMN_PLLSM1_PLLLOCK_TMR},
+ {0x0062, CMN_BGCAL_INIT_TMR},
+ {0x0062, CMN_BGCAL_ITER_TMR},
+ {0x0014, CMN_IBCAL_INIT_TMR},
+ {0x0018, CMN_TXPUCAL_INIT_TMR},
+ {0x0005, CMN_TXPUCAL_ITER_TMR},
+ {0x0018, CMN_TXPDCAL_INIT_TMR},
+ {0x0005, CMN_TXPDCAL_ITER_TMR},
+ {0x024A, CMN_RXCAL_INIT_TMR},
+ {0x0005, CMN_RXCAL_ITER_TMR},
+ {0x000B, CMN_SD_CAL_REFTIM_START},
+ {0x0132, CMN_SD_CAL_PLLCNT_START},
+ {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x0014, CMN_PLL0_DSM_FBH_OVRD_M0},
+ {0x0014, CMN_PLL1_DSM_FBH_OVRD_M0},
+ {0x0005, CMN_PLL0_DSM_FBL_OVRD_M0},
+ {0x0005, CMN_PLL1_DSM_FBL_OVRD_M0},
+ {0x061B, CMN_PLL0_VCOCAL_INIT_TMR},
+ {0x061B, CMN_PLL1_VCOCAL_INIT_TMR},
+ {0x0019, CMN_PLL0_VCOCAL_ITER_TMR},
+ {0x0019, CMN_PLL1_VCOCAL_ITER_TMR},
+ {0x1354, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x1354, CMN_PLL1_VCOCAL_REFTIM_START},
+ {0x1354, CMN_PLL0_VCOCAL_PLLCNT_START},
+ {0x1354, CMN_PLL1_VCOCAL_PLLCNT_START},
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL},
+ {0x0138, CMN_PLL0_LOCK_REFCNT_START},
+ {0x0138, CMN_PLL1_LOCK_REFCNT_START},
+ {0x0138, CMN_PLL0_LOCK_PLLCNT_START},
+ {0x0138, CMN_PLL1_LOCK_PLLCNT_START}
+};
+
+static struct cdns_reg_pairs usxgmii_156_25_no_ssc_tx_ln_regs[] = {
+ {0x07A2, TX_RCVDET_ST_TMR},
+ {0x00F3, TX_PSC_A0},
+ {0x04A2, TX_PSC_A2},
+ {0x04A2, TX_PSC_A3},
+ {0x0000, TX_TXCC_CPOST_MULT_00},
+ {0x0000, XCVR_DIAG_PSC_OVRD}
+};
+
+static struct cdns_reg_pairs usxgmii_156_25_no_ssc_rx_ln_regs[] = {
+ {0x0014, RX_SDCAL0_INIT_TMR},
+ {0x0062, RX_SDCAL0_ITER_TMR},
+ {0x0014, RX_SDCAL1_INIT_TMR},
+ {0x0062, RX_SDCAL1_ITER_TMR},
+ {0x091D, RX_PSC_A0},
+ {0x0900, RX_PSC_A2},
+ {0x0100, RX_PSC_A3},
+ {0x0030, RX_REE_SMGM_CTRL1},
+ {0x03C7, RX_REE_GCSM1_EQENM_PH1},
+ {0x01C7, RX_REE_GCSM1_EQENM_PH2},
+ {0x0000, RX_DIAG_DFE_CTRL},
+ {0x0019, RX_REE_TAP1_CLIP},
+ {0x0019, RX_REE_TAP2TON_CLIP},
+ {0x00B9, RX_DIAG_NQST_CTRL},
+ {0x0C21, RX_DIAG_DFE_AMP_TUNE_2},
+ {0x0002, RX_DIAG_DFE_AMP_TUNE_3},
+ {0x0033, RX_DIAG_PI_RATE},
+ {0x0001, RX_DIAG_ACYA},
+ {0x018C, RX_CDRLF_CNFG}
+};
+
+static struct cdns_torrent_vals sl_usxgmii_156_25_no_ssc_cmn_vals = {
+ .reg_pairs = sl_usxgmii_156_25_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_usxgmii_156_25_no_ssc_cmn_regs),
+};
+
+static struct cdns_torrent_vals usxgmii_156_25_no_ssc_tx_ln_vals = {
+ .reg_pairs = usxgmii_156_25_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(usxgmii_156_25_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_torrent_vals usxgmii_156_25_no_ssc_rx_ln_vals = {
+ .reg_pairs = usxgmii_156_25_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(usxgmii_156_25_no_ssc_rx_ln_regs),
+};
+
/* PCIe and DP link configuration */
static struct cdns_reg_pairs pcie_dp_link_cmn_regs[] = {
{0x0003, PHY_PLL_CFG},
@@ -3934,1093 +4156,401 @@ static struct cdns_torrent_vals pcie_100_no_ssc_rx_ln_vals = {
.num_regs = ARRAY_SIZE(pcie_100_ext_no_ssc_rx_ln_regs),
};
+static struct cdns_torrent_vals_entry link_cmn_vals_entries[] = {
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_NONE), &sl_dp_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_PCIE), &pcie_dp_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_USB), &usb_dp_link_cmn_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_NONE), NULL},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_SGMII), &pcie_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_QSGMII), &pcie_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_USB), &pcie_usb_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_DP), &pcie_dp_link_cmn_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_NONE), &sl_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_PCIE), &pcie_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USB), &usb_sgmii_link_cmn_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_NONE), &sl_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_PCIE), &pcie_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USB), &usb_sgmii_link_cmn_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &sl_usb_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &pcie_usb_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_SGMII), &usb_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_QSGMII), &usb_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_dp_link_cmn_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_NONE), &sl_usxgmii_link_cmn_vals},
+};
+
+static struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = {
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_NONE), &sl_dp_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_PCIE), &dp_pcie_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_USB), &dp_usb_xcvr_diag_ln_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_NONE), NULL},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_SGMII), &pcie_sgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_QSGMII), &pcie_sgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_USB), &pcie_usb_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_DP), &pcie_dp_xcvr_diag_ln_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_NONE), &sl_sgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_PCIE), &sgmii_pcie_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USB), &sgmii_usb_xcvr_diag_ln_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_NONE), &sl_sgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_PCIE), &sgmii_pcie_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USB), &sgmii_usb_xcvr_diag_ln_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &sl_usb_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &usb_pcie_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_SGMII), &usb_sgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_QSGMII), &usb_sgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_dp_xcvr_diag_ln_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_NONE), &sl_usxgmii_xcvr_diag_ln_vals},
+};
+
+static struct cdns_torrent_vals_entry pcs_cmn_vals_entries[] = {
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &usb_phy_pcs_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &usb_phy_pcs_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_SGMII), &usb_phy_pcs_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_QSGMII), &usb_phy_pcs_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_phy_pcs_cmn_vals},
+};
+
+static struct cdns_torrent_vals_entry cmn_vals_entries[] = {
+ {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_PCIE, NO_SSC), &dp_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_USB, NO_SSC), &sl_dp_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), &sl_pcie_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), &pcie_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, NO_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, EXTERNAL_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, INTERNAL_SSC), &pcie_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, NO_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, EXTERNAL_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, INTERNAL_SSC), &pcie_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sl_sgmii_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_NONE, NO_SSC), &sl_qsgmii_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &qsgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, NO_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, EXTERNAL_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, INTERNAL_SSC), &sl_usb_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, NO_SSC), &usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &sl_usb_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, NO_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, EXTERNAL_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, INTERNAL_SSC), &sl_usb_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &sl_usxgmii_156_25_no_ssc_cmn_vals},
+};
+
+static struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = {
+ {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_PCIE, NO_SSC), &dp_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_USB, NO_SSC), &dp_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_NONE, NO_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &usxgmii_156_25_no_ssc_tx_ln_vals},
+};
+
+static struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = {
+ {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_PCIE, NO_SSC), &dp_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_USB, NO_SSC), &dp_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_NONE, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &usxgmii_156_25_no_ssc_rx_ln_vals},
+};
+
static const struct cdns_torrent_data cdns_map_torrent = {
.block_offset_shift = 0x2,
.reg_offset_shift = 0x2,
- .link_cmn_vals = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_link_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &pcie_dp_link_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &usb_dp_link_cmn_vals,
- },
- },
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_usb_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &pcie_dp_link_cmn_vals,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_link_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_link_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_usb_link_cmn_vals,
- [EXTERNAL_SSC] = &sl_usb_link_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_link_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &pcie_usb_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_dp_link_cmn_vals,
- },
- },
+ .link_cmn_vals_tbl = {
+ .entries = link_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(link_cmn_vals_entries),
},
- .xcvr_diag_vals = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_xcvr_diag_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &dp_pcie_xcvr_diag_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &dp_usb_xcvr_diag_ln_vals,
- },
- },
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &pcie_dp_xcvr_diag_ln_vals,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_pcie_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_dp_xcvr_diag_ln_vals,
- },
- },
+ .xcvr_diag_vals_tbl = {
+ .entries = xcvr_diag_vals_entries,
+ .num_entries = ARRAY_SIZE(xcvr_diag_vals_entries),
},
- .pcs_cmn_vals = {
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- },
- },
+ .pcs_cmn_vals_tbl = {
+ .entries = pcs_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(pcs_cmn_vals_entries),
},
- .cmn_vals = {
- [CLK_19_2_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_19_2_no_ssc_cmn_vals,
- },
- },
- },
- [CLK_25_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_25_no_ssc_cmn_vals,
- },
- },
- },
- [CLK_100_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_100_no_ssc_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &dp_100_no_ssc_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sl_dp_100_no_ssc_cmn_vals,
- },
- },
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = &sl_pcie_100_int_ssc_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = NULL,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_100_no_ssc_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sgmii_100_int_ssc_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_qsgmii_100_no_ssc_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &qsgmii_100_int_ssc_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &usb_100_int_ssc_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_100_no_ssc_cmn_vals,
- },
- },
- },
+ .cmn_vals_tbl = {
+ .entries = cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(cmn_vals_entries),
},
- .tx_ln_vals = {
- [CLK_19_2_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_19_2_no_ssc_tx_ln_vals,
- },
- },
- },
- [CLK_25_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_25_no_ssc_tx_ln_vals,
- },
- },
- },
- [CLK_100_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_100_no_ssc_tx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &dp_100_no_ssc_tx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &dp_100_no_ssc_tx_ln_vals,
- },
- },
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_USB] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_DP] = {
- [NO_SSC] = NULL,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- },
- },
+ .tx_ln_vals_tbl = {
+ .entries = cdns_tx_ln_vals_entries,
+ .num_entries = ARRAY_SIZE(cdns_tx_ln_vals_entries),
},
- .rx_ln_vals = {
- [CLK_19_2_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_19_2_no_ssc_rx_ln_vals,
- },
- },
- },
- [CLK_25_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_25_no_ssc_rx_ln_vals,
- },
- },
- },
- [CLK_100_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_100_no_ssc_rx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &dp_100_no_ssc_rx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &dp_100_no_ssc_rx_ln_vals,
- },
- },
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- },
- },
+ .rx_ln_vals_tbl = {
+ .entries = cdns_rx_ln_vals_entries,
+ .num_entries = ARRAY_SIZE(cdns_rx_ln_vals_entries),
},
};
+static struct cdns_torrent_vals_entry j721e_phy_pma_cmn_vals_entries[] = {
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_NONE), &ti_usxgmii_phy_pma_cmn_vals},
+};
+
+static struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = {
+ {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_PCIE, NO_SSC), &dp_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_USB, NO_SSC), &dp_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_NONE, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &usxgmii_156_25_no_ssc_tx_ln_vals},
+};
+
static const struct cdns_torrent_data ti_j721e_map_torrent = {
.block_offset_shift = 0x0,
.reg_offset_shift = 0x1,
- .link_cmn_vals = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_link_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &pcie_dp_link_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &usb_dp_link_cmn_vals,
- },
- },
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_usb_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &pcie_dp_link_cmn_vals,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_link_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_link_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_usb_link_cmn_vals,
- [EXTERNAL_SSC] = &sl_usb_link_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_link_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &pcie_usb_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_dp_link_cmn_vals,
- },
- },
+ .link_cmn_vals_tbl = {
+ .entries = link_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(link_cmn_vals_entries),
+ },
+ .xcvr_diag_vals_tbl = {
+ .entries = xcvr_diag_vals_entries,
+ .num_entries = ARRAY_SIZE(xcvr_diag_vals_entries),
},
- .xcvr_diag_vals = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_xcvr_diag_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &dp_pcie_xcvr_diag_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &dp_usb_xcvr_diag_ln_vals,
- },
- },
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &pcie_dp_xcvr_diag_ln_vals,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_pcie_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_dp_xcvr_diag_ln_vals,
- },
- },
+ .pcs_cmn_vals_tbl = {
+ .entries = pcs_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(pcs_cmn_vals_entries),
},
- .pcs_cmn_vals = {
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- },
- },
+ .phy_pma_cmn_vals_tbl = {
+ .entries = j721e_phy_pma_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(j721e_phy_pma_cmn_vals_entries),
},
- .cmn_vals = {
- [CLK_19_2_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_19_2_no_ssc_cmn_vals,
- },
- },
- },
- [CLK_25_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_25_no_ssc_cmn_vals,
- },
- },
- },
- [CLK_100_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_100_no_ssc_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &dp_100_no_ssc_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sl_dp_100_no_ssc_cmn_vals,
- },
- },
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = &sl_pcie_100_int_ssc_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = NULL,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_100_no_ssc_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sgmii_100_int_ssc_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_qsgmii_100_no_ssc_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &qsgmii_100_int_ssc_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &usb_100_int_ssc_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_100_no_ssc_cmn_vals,
- },
- },
- },
+ .cmn_vals_tbl = {
+ .entries = cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(cmn_vals_entries),
},
- .tx_ln_vals = {
- [CLK_19_2_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_19_2_no_ssc_tx_ln_vals,
- },
- },
- },
- [CLK_25_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_25_no_ssc_tx_ln_vals,
- },
- },
- },
- [CLK_100_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_100_no_ssc_tx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &dp_100_no_ssc_tx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &dp_100_no_ssc_tx_ln_vals,
- },
- },
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_USB] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_DP] = {
- [NO_SSC] = NULL,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- },
- },
+ .tx_ln_vals_tbl = {
+ .entries = ti_tx_ln_vals_entries,
+ .num_entries = ARRAY_SIZE(ti_tx_ln_vals_entries),
},
- .rx_ln_vals = {
- [CLK_19_2_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_19_2_no_ssc_rx_ln_vals,
- },
- },
- },
- [CLK_25_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_25_no_ssc_rx_ln_vals,
- },
- },
- },
- [CLK_100_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_100_no_ssc_rx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &dp_100_no_ssc_rx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &dp_100_no_ssc_rx_ln_vals,
- },
- },
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- },
- },
+ .rx_ln_vals_tbl = {
+ .entries = cdns_rx_ln_vals_entries,
+ .num_entries = ARRAY_SIZE(cdns_rx_ln_vals_entries),
},
};
diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
index d4c92498ad1e..b700f52b7b67 100644
--- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
@@ -11,7 +11,7 @@
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
index 88826ceb72f8..0b9a59d5b8f0 100644
--- a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
+++ b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
@@ -6,7 +6,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
@@ -394,7 +394,7 @@ static int imx8mq_usb_phy_probe(struct platform_device *pdev)
imx_phy->vbus = devm_regulator_get(dev, "vbus");
if (IS_ERR(imx_phy->vbus))
- return PTR_ERR(imx_phy->vbus);
+ return dev_err_probe(dev, PTR_ERR(imx_phy->vbus), "failed to get vbus\n");
phy_set_drvdata(imx_phy->phy, imx_phy);
diff --git a/drivers/phy/freescale/phy-fsl-lynx-28g.c b/drivers/phy/freescale/phy-fsl-lynx-28g.c
index 569f12af2aaf..4f036c77284e 100644
--- a/drivers/phy/freescale/phy-fsl-lynx-28g.c
+++ b/drivers/phy/freescale/phy-fsl-lynx-28g.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2021-2022 NXP. */
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/hisilicon/phy-hi3660-usb3.c b/drivers/phy/hisilicon/phy-hi3660-usb3.c
index 84adce9b4277..e2a09d67faed 100644
--- a/drivers/phy/hisilicon/phy-hi3660-usb3.c
+++ b/drivers/phy/hisilicon/phy-hi3660-usb3.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/hisilicon/phy-hi3670-usb3.c b/drivers/phy/hisilicon/phy-hi3670-usb3.c
index b9ffe08abaab..40d3cf128b44 100644
--- a/drivers/phy/hisilicon/phy-hi3670-usb3.c
+++ b/drivers/phy/hisilicon/phy-hi3670-usb3.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/hisilicon/phy-hi6220-usb.c b/drivers/phy/hisilicon/phy-hi6220-usb.c
index e92ba78da4c8..97bd363dfe87 100644
--- a/drivers/phy/hisilicon/phy-hi6220-usb.c
+++ b/drivers/phy/hisilicon/phy-hi6220-usb.c
@@ -5,6 +5,7 @@
*/
#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
diff --git a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
index 6ae6d509dfdd..c138cd4807d6 100644
--- a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
+++ b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
@@ -9,8 +9,9 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#define INNO_PHY_PORT_NUM 2
diff --git a/drivers/phy/hisilicon/phy-histb-combphy.c b/drivers/phy/hisilicon/phy-histb-combphy.c
index f1cb3e4d2add..c44588fd5a53 100644
--- a/drivers/phy/hisilicon/phy-histb-combphy.c
+++ b/drivers/phy/hisilicon/phy-histb-combphy.c
@@ -13,8 +13,9 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <dt-bindings/phy/phy.h>
diff --git a/drivers/phy/hisilicon/phy-hix5hd2-sata.c b/drivers/phy/hisilicon/phy-hix5hd2-sata.c
index b0f99a9ac857..1b26ddb4c8a7 100644
--- a/drivers/phy/hisilicon/phy-hix5hd2-sata.c
+++ b/drivers/phy/hisilicon/phy-hix5hd2-sata.c
@@ -8,6 +8,7 @@
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/ingenic/phy-ingenic-usb.c b/drivers/phy/ingenic/phy-ingenic-usb.c
index 28c28d816484..eb2721f72a4c 100644
--- a/drivers/phy/ingenic/phy-ingenic-usb.c
+++ b/drivers/phy/ingenic/phy-ingenic-usb.c
@@ -11,6 +11,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
index 29d246ea24b4..82f1ffc0b0ad 100644
--- a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
+++ b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/property.h>
diff --git a/drivers/phy/marvell/phy-armada38x-comphy.c b/drivers/phy/marvell/phy-armada38x-comphy.c
index 0fe408964334..b7d99861526a 100644
--- a/drivers/phy/marvell/phy-armada38x-comphy.c
+++ b/drivers/phy/marvell/phy-armada38x-comphy.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/marvell/phy-berlin-sata.c b/drivers/phy/marvell/phy-berlin-sata.c
index d70ba9bc42d9..f972d78372ea 100644
--- a/drivers/phy/marvell/phy-berlin-sata.c
+++ b/drivers/phy/marvell/phy-berlin-sata.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/io.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/marvell/phy-mmp3-hsic.c b/drivers/phy/marvell/phy-mmp3-hsic.c
index f2537fdcc3ab..271f1a2258ef 100644
--- a/drivers/phy/marvell/phy-mmp3-hsic.c
+++ b/drivers/phy/marvell/phy-mmp3-hsic.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/marvell/phy-mmp3-usb.c b/drivers/phy/marvell/phy-mmp3-usb.c
index 04c0bada3519..5b71deb08851 100644
--- a/drivers/phy/marvell/phy-mmp3-usb.c
+++ b/drivers/phy/marvell/phy-mmp3-usb.c
@@ -6,6 +6,7 @@
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
index d641b345afa3..24c3371e2bb2 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
@@ -19,6 +19,7 @@
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-utmi.c b/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
index 8834436bc9db..04f4fb4bed70 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
@@ -13,7 +13,7 @@
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index 34672e868a1e..b0dd13366598 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -11,6 +11,7 @@
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -1011,8 +1012,7 @@ static int mvebu_comphy_probe(struct platform_device *pdev)
"marvell,system-controller");
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-utmi.c b/drivers/phy/marvell/phy-mvebu-cp110-utmi.c
index aa27c7994610..4922a5f3327d 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-utmi.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-utmi.c
@@ -12,7 +12,7 @@
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/marvell/phy-mvebu-sata.c b/drivers/phy/marvell/phy-mvebu-sata.c
index 51a4646e2933..89a5a2b69d80 100644
--- a/drivers/phy/marvell/phy-mvebu-sata.c
+++ b/drivers/phy/marvell/phy-mvebu-sata.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/phy/phy.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
struct priv {
diff --git a/drivers/phy/marvell/phy-pxa-28nm-usb2.c b/drivers/phy/marvell/phy-pxa-28nm-usb2.c
index 1b2107f80f3a..64afb82cf70e 100644
--- a/drivers/phy/marvell/phy-pxa-28nm-usb2.c
+++ b/drivers/phy/marvell/phy-pxa-28nm-usb2.c
@@ -11,7 +11,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/err.h>
diff --git a/drivers/phy/marvell/phy-pxa-usb.c b/drivers/phy/marvell/phy-pxa-usb.c
index ffe889893ff4..6c98eb9608e9 100644
--- a/drivers/phy/marvell/phy-pxa-usb.c
+++ b/drivers/phy/marvell/phy-pxa-usb.c
@@ -296,7 +296,7 @@ static int pxa_usb_phy_probe(struct platform_device *pdev)
of_id = of_match_node(pxa_usb_phy_of_match, dev->of_node);
if (of_id)
- pxa_usb_phy->version = (enum pxa_usb_phy_version)of_id->data;
+ pxa_usb_phy->version = (uintptr_t)of_id->data;
else
pxa_usb_phy->version = PXA_USB_PHY_MMP2;
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.h b/drivers/phy/mediatek/phy-mtk-hdmi.h
index fc2ad6a0527f..71c02d043485 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi.h
+++ b/drivers/phy/mediatek/phy-mtk-hdmi.h
@@ -11,7 +11,6 @@
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/types.h>
diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
index 526c05a4af5e..065ea626093a 100644
--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
+++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
@@ -36,7 +36,7 @@ static int mtk_mipi_tx_power_on(struct phy *phy)
int ret;
/* Power up core and enable PLL */
- ret = clk_prepare_enable(mipi_tx->pll);
+ ret = clk_prepare_enable(mipi_tx->pll_hw.clk);
if (ret < 0)
return ret;
@@ -53,7 +53,7 @@ static int mtk_mipi_tx_power_off(struct phy *phy)
mipi_tx->driver_data->mipi_tx_disable_signal(phy);
/* Disable PLL and power down core */
- clk_disable_unprepare(mipi_tx->pll);
+ clk_disable_unprepare(mipi_tx->pll_hw.clk);
return 0;
}
@@ -158,9 +158,9 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
clk_init.ops = mipi_tx->driver_data->mipi_tx_clk_ops;
mipi_tx->pll_hw.init = &clk_init;
- mipi_tx->pll = devm_clk_register(dev, &mipi_tx->pll_hw);
- if (IS_ERR(mipi_tx->pll))
- return dev_err_probe(dev, PTR_ERR(mipi_tx->pll), "Failed to register PLL\n");
+ ret = devm_clk_hw_register(dev, &mipi_tx->pll_hw);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register PLL\n");
phy = devm_phy_create(dev, NULL, &mtk_mipi_tx_ops);
if (IS_ERR(phy))
@@ -176,29 +176,19 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
mtk_mipi_tx_get_calibration_datal(mipi_tx);
- return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
- mipi_tx->pll);
-}
-
-static void mtk_mipi_tx_remove(struct platform_device *pdev)
-{
- of_clk_del_provider(pdev->dev.of_node);
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &mipi_tx->pll_hw);
}
static const struct of_device_id mtk_mipi_tx_match[] = {
- { .compatible = "mediatek,mt2701-mipi-tx",
- .data = &mt2701_mipitx_data },
- { .compatible = "mediatek,mt8173-mipi-tx",
- .data = &mt8173_mipitx_data },
- { .compatible = "mediatek,mt8183-mipi-tx",
- .data = &mt8183_mipitx_data },
- { },
+ { .compatible = "mediatek,mt2701-mipi-tx", .data = &mt2701_mipitx_data },
+ { .compatible = "mediatek,mt8173-mipi-tx", .data = &mt8173_mipitx_data },
+ { .compatible = "mediatek,mt8183-mipi-tx", .data = &mt8183_mipitx_data },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mtk_mipi_tx_match);
static struct platform_driver mtk_mipi_tx_driver = {
.probe = mtk_mipi_tx_probe,
- .remove_new = mtk_mipi_tx_remove,
.driver = {
.name = "mediatek-mipi-tx",
.of_match_table = mtk_mipi_tx_match,
diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi.h b/drivers/phy/mediatek/phy-mtk-mipi-dsi.h
index 47b60b1a7226..5d4876f1dc95 100644
--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi.h
+++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi.h
@@ -12,7 +12,6 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/slab.h>
@@ -32,7 +31,6 @@ struct mtk_mipi_tx {
u32 rt_code[5];
const struct mtk_mipitx_data *driver_data;
struct clk_hw pll_hw;
- struct clk *pll;
};
struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw);
diff --git a/drivers/phy/mediatek/phy-mtk-pcie.c b/drivers/phy/mediatek/phy-mtk-pcie.c
index 25dbd6e35722..a2f69d6c72f0 100644
--- a/drivers/phy/mediatek/phy-mtk-pcie.c
+++ b/drivers/phy/mediatek/phy-mtk-pcie.c
@@ -7,7 +7,7 @@
#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index 0d110e50bbfd..05eab9014132 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -13,8 +13,8 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/mediatek/phy-mtk-ufs.c b/drivers/phy/mediatek/phy-mtk-ufs.c
index fc19e0fa8ed5..0cb5a25b1b7a 100644
--- a/drivers/phy/mediatek/phy-mtk-ufs.c
+++ b/drivers/phy/mediatek/phy-mtk-ufs.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/phy-can-transceiver.c b/drivers/phy/phy-can-transceiver.c
index 5487b9dd1ead..840b7f8a31c5 100644
--- a/drivers/phy/phy-can-transceiver.c
+++ b/drivers/phy/phy-can-transceiver.c
@@ -5,6 +5,7 @@
* Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com
*
*/
+#include <linux/of.h>
#include<linux/phy/phy.h>
#include<linux/platform_device.h>
#include<linux/module.h>
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c
index d0f4546648f0..1f0f908323f0 100644
--- a/drivers/phy/phy-xgene.c
+++ b/drivers/phy/phy-xgene.c
@@ -39,6 +39,7 @@
* Currently, this driver only supports Gen3 SATA mode with external clock.
*/
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/delay.h>
diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
index 97ca5952e34e..d891058b7c39 100644
--- a/drivers/phy/qualcomm/Kconfig
+++ b/drivers/phy/qualcomm/Kconfig
@@ -102,6 +102,16 @@ config PHY_QCOM_QMP_USB
Enable this to support the QMP USB PHY transceiver that is used
with USB3 controllers on Qualcomm chips.
+config PHY_QCOM_QMP_USB_LEGACY
+ tristate "Qualcomm QMP legacy USB PHY Driver"
+ select GENERIC_PHY
+ default n
+ help
+ Enable this legacy driver to support the QMP USB+DisplayPort Combo
+ PHY transceivers working only in USB3 mode on Qualcomm chips. This
+ driver exists only for compatibility with older device trees,
+ existing users have been migrated to PHY_QCOM_QMP_COMBO driver.
+
endif # PHY_QCOM_QMP
config PHY_QCOM_QUSB2
@@ -133,6 +143,17 @@ config PHY_QCOM_EUSB2_REPEATER
PMICs. The repeater is paired with a Synopsys eUSB2 Phy
on Qualcomm SOCs.
+config PHY_QCOM_M31_USB
+ tristate "Qualcomm M31 HS PHY driver support"
+ depends on USB && (ARCH_QCOM || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ Enable this to support M31 HS PHY transceivers on Qualcomm chips
+ with DWC3 USB core. It handles PHY initialization, clock
+ management required after resetting the hardware and power
+ management. This driver is required even for peripheral only or
+ host only mode configurations.
+
config PHY_QCOM_USB_HS
tristate "Qualcomm USB HS PHY module"
depends on USB_ULPI_BUS
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index b030858e0f8d..ffd609ac6233 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_PHY_QCOM_APQ8064_SATA) += phy-qcom-apq8064-sata.o
obj-$(CONFIG_PHY_QCOM_EDP) += phy-qcom-edp.o
obj-$(CONFIG_PHY_QCOM_IPQ4019_USB) += phy-qcom-ipq4019-usb.o
obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o
+obj-$(CONFIG_PHY_QCOM_M31_USB) += phy-qcom-m31.o
obj-$(CONFIG_PHY_QCOM_PCIE2) += phy-qcom-pcie2.o
obj-$(CONFIG_PHY_QCOM_QMP_COMBO) += phy-qcom-qmp-combo.o
@@ -11,6 +12,7 @@ obj-$(CONFIG_PHY_QCOM_QMP_PCIE) += phy-qcom-qmp-pcie.o
obj-$(CONFIG_PHY_QCOM_QMP_PCIE_8996) += phy-qcom-qmp-pcie-msm8996.o
obj-$(CONFIG_PHY_QCOM_QMP_UFS) += phy-qcom-qmp-ufs.o
obj-$(CONFIG_PHY_QCOM_QMP_USB) += phy-qcom-qmp-usb.o
+obj-$(CONFIG_PHY_QCOM_QMP_USB_LEGACY) += phy-qcom-qmp-usb-legacy.o
obj-$(CONFIG_PHY_QCOM_QUSB2) += phy-qcom-qusb2.o
obj-$(CONFIG_PHY_QCOM_SNPS_EUSB2) += phy-qcom-snps-eusb2.o
diff --git a/drivers/phy/qualcomm/phy-ath79-usb.c b/drivers/phy/qualcomm/phy-ath79-usb.c
index 09a77e556ece..f8d0199c6e78 100644
--- a/drivers/phy/qualcomm/phy-ath79-usb.c
+++ b/drivers/phy/qualcomm/phy-ath79-usb.c
@@ -5,6 +5,7 @@
* Copyright (C) 2015-2018 Alban Bedel <albeu@free.fr>
*/
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
diff --git a/drivers/phy/qualcomm/phy-qcom-edp.c b/drivers/phy/qualcomm/phy-qcom-edp.c
index 5c4305df7d53..8e5078304646 100644
--- a/drivers/phy/qualcomm/phy-qcom-edp.c
+++ b/drivers/phy/qualcomm/phy-qcom-edp.c
@@ -13,8 +13,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
index 90f8543ba265..52c275fbb2a1 100644
--- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
@@ -8,7 +8,6 @@
#include <linux/regulator/consumer.h>
#include <linux/regmap.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
/* eUSB2 status registers */
diff --git a/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c b/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c
index d3e7d5e1d1b6..da6f290af722 100644
--- a/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c
@@ -13,8 +13,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_platform.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
diff --git a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
index 7bacc527fbad..06392ed7c91b 100644
--- a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
@@ -4,7 +4,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c
new file mode 100644
index 000000000000..ed08072ca032
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-m31.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2014-2023, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#define USB2PHY_PORT_UTMI_CTRL1 0x40
+
+#define USB2PHY_PORT_UTMI_CTRL2 0x44
+ #define UTMI_ULPI_SEL BIT(7)
+ #define UTMI_TEST_MUX_SEL BIT(6)
+
+#define HS_PHY_CTRL_REG 0x10
+ #define UTMI_OTG_VBUS_VALID BIT(20)
+ #define SW_SESSVLD_SEL BIT(28)
+
+#define USB_PHY_UTMI_CTRL0 0x3c
+
+#define USB_PHY_UTMI_CTRL5 0x50
+ #define POR_EN BIT(1)
+
+#define USB_PHY_HS_PHY_CTRL_COMMON0 0x54
+ #define COMMONONN BIT(7)
+ #define FSEL BIT(4)
+ #define RETENABLEN BIT(3)
+ #define FREQ_24MHZ (BIT(6) | BIT(4))
+
+#define USB_PHY_HS_PHY_CTRL2 0x64
+ #define USB2_SUSPEND_N_SEL BIT(3)
+ #define USB2_SUSPEND_N BIT(2)
+ #define USB2_UTMI_CLK_EN BIT(1)
+
+#define USB_PHY_CFG0 0x94
+ #define UTMI_PHY_OVERRIDE_EN BIT(1)
+
+#define USB_PHY_REFCLK_CTRL 0xa0
+ #define CLKCORE BIT(1)
+
+#define USB2PHY_PORT_POWERDOWN 0xa4
+ #define POWER_UP BIT(0)
+ #define POWER_DOWN 0
+
+#define USB_PHY_FSEL_SEL 0xb8
+ #define FREQ_SEL BIT(0)
+
+#define USB2PHY_USB_PHY_M31_XCFGI_1 0xbc
+ #define USB2_0_TX_ENABLE BIT(2)
+
+#define USB2PHY_USB_PHY_M31_XCFGI_4 0xc8
+ #define HSTX_SLEW_RATE_565PS GENMASK(1, 0)
+ #define PLL_CHARGING_PUMP_CURRENT_35UA GENMASK(4, 3)
+ #define ODT_VALUE_38_02_OHM GENMASK(7, 6)
+
+#define USB2PHY_USB_PHY_M31_XCFGI_5 0xcc
+ #define ODT_VALUE_45_02_OHM BIT(2)
+ #define HSTX_PRE_EMPHASIS_LEVEL_0_55MA BIT(0)
+
+#define USB2PHY_USB_PHY_M31_XCFGI_11 0xe4
+ #define XCFG_COARSE_TUNE_NUM BIT(1)
+ #define XCFG_FINE_TUNE_NUM BIT(3)
+
+struct m31_phy_regs {
+ u32 off;
+ u32 val;
+ u32 delay;
+};
+
+struct m31_priv_data {
+ bool ulpi_mode;
+ const struct m31_phy_regs *regs;
+ unsigned int nregs;
+};
+
+struct m31_phy_regs m31_ipq5332_regs[] = {
+ {
+ USB_PHY_CFG0,
+ UTMI_PHY_OVERRIDE_EN,
+ 0
+ },
+ {
+ USB_PHY_UTMI_CTRL5,
+ POR_EN,
+ 15
+ },
+ {
+ USB_PHY_FSEL_SEL,
+ FREQ_SEL,
+ 0
+ },
+ {
+ USB_PHY_HS_PHY_CTRL_COMMON0,
+ COMMONONN | FREQ_24MHZ | RETENABLEN,
+ 0
+ },
+ {
+ USB_PHY_UTMI_CTRL5,
+ POR_EN,
+ 0
+ },
+ {
+ USB_PHY_HS_PHY_CTRL2,
+ USB2_SUSPEND_N_SEL | USB2_SUSPEND_N | USB2_UTMI_CLK_EN,
+ 0
+ },
+ {
+ USB2PHY_USB_PHY_M31_XCFGI_11,
+ XCFG_COARSE_TUNE_NUM | XCFG_FINE_TUNE_NUM,
+ 0
+ },
+ {
+ USB2PHY_USB_PHY_M31_XCFGI_4,
+ HSTX_SLEW_RATE_565PS | PLL_CHARGING_PUMP_CURRENT_35UA | ODT_VALUE_38_02_OHM,
+ 0
+ },
+ {
+ USB2PHY_USB_PHY_M31_XCFGI_1,
+ USB2_0_TX_ENABLE,
+ 0
+ },
+ {
+ USB2PHY_USB_PHY_M31_XCFGI_5,
+ ODT_VALUE_45_02_OHM | HSTX_PRE_EMPHASIS_LEVEL_0_55MA,
+ 4
+ },
+ {
+ USB_PHY_UTMI_CTRL5,
+ 0x0,
+ 0
+ },
+ {
+ USB_PHY_HS_PHY_CTRL2,
+ USB2_SUSPEND_N | USB2_UTMI_CLK_EN,
+ 0
+ },
+};
+
+struct m31usb_phy {
+ struct phy *phy;
+ void __iomem *base;
+ const struct m31_phy_regs *regs;
+ int nregs;
+
+ struct regulator *vreg;
+ struct clk *clk;
+ struct reset_control *reset;
+
+ bool ulpi_mode;
+};
+
+static int m31usb_phy_init(struct phy *phy)
+{
+ struct m31usb_phy *qphy = phy_get_drvdata(phy);
+ const struct m31_phy_regs *regs = qphy->regs;
+ int i, ret;
+
+ ret = regulator_enable(qphy->vreg);
+ if (ret) {
+ dev_err(&phy->dev, "failed to enable regulator, %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(qphy->clk);
+ if (ret) {
+ if (qphy->vreg)
+ regulator_disable(qphy->vreg);
+ dev_err(&phy->dev, "failed to enable cfg ahb clock, %d\n", ret);
+ return ret;
+ }
+
+ /* Perform phy reset */
+ reset_control_assert(qphy->reset);
+ udelay(5);
+ reset_control_deassert(qphy->reset);
+
+ /* configure for ULPI mode if requested */
+ if (qphy->ulpi_mode)
+ writel(0x0, qphy->base + USB2PHY_PORT_UTMI_CTRL2);
+
+ /* Enable the PHY */
+ writel(POWER_UP, qphy->base + USB2PHY_PORT_POWERDOWN);
+
+ /* Turn on phy ref clock */
+ for (i = 0; i < qphy->nregs; i++) {
+ writel(regs[i].val, qphy->base + regs[i].off);
+ if (regs[i].delay)
+ udelay(regs[i].delay);
+ }
+
+ return 0;
+}
+
+static int m31usb_phy_shutdown(struct phy *phy)
+{
+ struct m31usb_phy *qphy = phy_get_drvdata(phy);
+
+ /* Disable the PHY */
+ writel_relaxed(POWER_DOWN, qphy->base + USB2PHY_PORT_POWERDOWN);
+
+ clk_disable_unprepare(qphy->clk);
+
+ regulator_disable(qphy->vreg);
+
+ return 0;
+}
+
+static const struct phy_ops m31usb_phy_gen_ops = {
+ .power_on = m31usb_phy_init,
+ .power_off = m31usb_phy_shutdown,
+ .owner = THIS_MODULE,
+};
+
+static int m31usb_phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ const struct m31_priv_data *data;
+ struct device *dev = &pdev->dev;
+ struct m31usb_phy *qphy;
+
+ qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
+ if (!qphy)
+ return -ENOMEM;
+
+ qphy->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(qphy->base))
+ return PTR_ERR(qphy->base);
+
+ qphy->reset = devm_reset_control_get_exclusive_by_index(dev, 0);
+ if (IS_ERR(qphy->reset))
+ return PTR_ERR(qphy->reset);
+
+ qphy->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(qphy->clk))
+ return dev_err_probe(dev, PTR_ERR(qphy->clk),
+ "failed to get clk\n");
+
+ data = of_device_get_match_data(dev);
+ qphy->regs = data->regs;
+ qphy->nregs = data->nregs;
+ qphy->ulpi_mode = data->ulpi_mode;
+
+ qphy->phy = devm_phy_create(dev, NULL, &m31usb_phy_gen_ops);
+ if (IS_ERR(qphy->phy))
+ return dev_err_probe(dev, PTR_ERR(qphy->phy),
+ "failed to create phy\n");
+
+ qphy->vreg = devm_regulator_get(dev, "vdda-phy");
+ if (IS_ERR(qphy->vreg))
+ return dev_err_probe(dev, PTR_ERR(qphy->phy),
+ "failed to get vreg\n");
+
+ phy_set_drvdata(qphy->phy, qphy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (!IS_ERR(phy_provider))
+ dev_info(dev, "Registered M31 USB phy\n");
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct m31_priv_data m31_ipq5332_data = {
+ .ulpi_mode = false,
+ .regs = m31_ipq5332_regs,
+ .nregs = ARRAY_SIZE(m31_ipq5332_regs),
+};
+
+static const struct of_device_id m31usb_phy_id_table[] = {
+ { .compatible = "qcom,ipq5332-usb-hsphy", .data = &m31_ipq5332_data },
+ { },
+};
+MODULE_DEVICE_TABLE(of, m31usb_phy_id_table);
+
+static struct platform_driver m31usb_phy_driver = {
+ .probe = m31usb_phy_probe,
+ .driver = {
+ .name = "qcom-m31usb-phy",
+ .of_match_table = m31usb_phy_id_table,
+ },
+};
+
+module_platform_driver(m31usb_phy_driver);
+
+MODULE_DESCRIPTION("USB2 Qualcomm M31 HSPHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index bebce8c591a3..cbb28afce135 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -106,6 +105,20 @@ enum qphy_reg_layout {
QPHY_PCS_AUTONOMOUS_MODE_CTRL,
QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
QPHY_PCS_POWER_DOWN_CONTROL,
+
+ QPHY_COM_RESETSM_CNTRL,
+ QPHY_COM_C_READY_STATUS,
+ QPHY_COM_CMN_STATUS,
+ QPHY_COM_BIAS_EN_CLKBUFLR_EN,
+
+ QPHY_DP_PHY_STATUS,
+
+ QPHY_TX_TX_POL_INV,
+ QPHY_TX_TX_DRV_LVL,
+ QPHY_TX_TX_EMP_POST1_LVL,
+ QPHY_TX_HIGHZ_DRVR_EN,
+ QPHY_TX_TRANSCEIVER_BIAS_EN,
+
/* Keep last to ensure regs_layout arrays are properly initialized */
QPHY_LAYOUT_SIZE
};
@@ -117,9 +130,22 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V3_PCS_POWER_DOWN_CONTROL,
[QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V3_PCS_AUTONOMOUS_MODE_CTRL,
[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V3_PCS_LFPS_RXTERM_IRQ_CLEAR,
+
+ [QPHY_COM_RESETSM_CNTRL] = QSERDES_V3_COM_RESETSM_CNTRL,
+ [QPHY_COM_C_READY_STATUS] = QSERDES_V3_COM_C_READY_STATUS,
+ [QPHY_COM_CMN_STATUS] = QSERDES_V3_COM_CMN_STATUS,
+ [QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN,
+
+ [QPHY_DP_PHY_STATUS] = QSERDES_V3_DP_PHY_STATUS,
+
+ [QPHY_TX_TX_POL_INV] = QSERDES_V3_TX_TX_POL_INV,
+ [QPHY_TX_TX_DRV_LVL] = QSERDES_V3_TX_TX_DRV_LVL,
+ [QPHY_TX_TX_EMP_POST1_LVL] = QSERDES_V3_TX_TX_EMP_POST1_LVL,
+ [QPHY_TX_HIGHZ_DRVR_EN] = QSERDES_V3_TX_HIGHZ_DRVR_EN,
+ [QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V3_TX_TRANSCEIVER_BIAS_EN,
};
-static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+static const unsigned int qmp_v45_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_SW_RESET] = QPHY_V4_PCS_SW_RESET,
[QPHY_START_CTRL] = QPHY_V4_PCS_START_CONTROL,
[QPHY_PCS_STATUS] = QPHY_V4_PCS_PCS_STATUS1,
@@ -128,6 +154,67 @@ static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
/* In PCS_USB */
[QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V4_PCS_USB3_AUTONOMOUS_MODE_CTRL,
[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V4_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
+
+ [QPHY_COM_RESETSM_CNTRL] = QSERDES_V4_COM_RESETSM_CNTRL,
+ [QPHY_COM_C_READY_STATUS] = QSERDES_V4_COM_C_READY_STATUS,
+ [QPHY_COM_CMN_STATUS] = QSERDES_V4_COM_CMN_STATUS,
+ [QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN,
+
+ [QPHY_DP_PHY_STATUS] = QSERDES_V4_DP_PHY_STATUS,
+
+ [QPHY_TX_TX_POL_INV] = QSERDES_V4_TX_TX_POL_INV,
+ [QPHY_TX_TX_DRV_LVL] = QSERDES_V4_TX_TX_DRV_LVL,
+ [QPHY_TX_TX_EMP_POST1_LVL] = QSERDES_V4_TX_TX_EMP_POST1_LVL,
+ [QPHY_TX_HIGHZ_DRVR_EN] = QSERDES_V4_TX_HIGHZ_DRVR_EN,
+ [QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V4_TX_TRANSCEIVER_BIAS_EN,
+};
+
+static const unsigned int qmp_v5_5nm_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V5_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V5_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V5_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V5_PCS_POWER_DOWN_CONTROL,
+
+ /* In PCS_USB */
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V5_PCS_USB3_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V5_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
+
+ [QPHY_COM_RESETSM_CNTRL] = QSERDES_V5_COM_RESETSM_CNTRL,
+ [QPHY_COM_C_READY_STATUS] = QSERDES_V5_COM_C_READY_STATUS,
+ [QPHY_COM_CMN_STATUS] = QSERDES_V5_COM_CMN_STATUS,
+ [QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN,
+
+ [QPHY_DP_PHY_STATUS] = QSERDES_V5_DP_PHY_STATUS,
+
+ [QPHY_TX_TX_POL_INV] = QSERDES_V5_5NM_TX_TX_POL_INV,
+ [QPHY_TX_TX_DRV_LVL] = QSERDES_V5_5NM_TX_TX_DRV_LVL,
+ [QPHY_TX_TX_EMP_POST1_LVL] = QSERDES_V5_5NM_TX_TX_EMP_POST1_LVL,
+ [QPHY_TX_HIGHZ_DRVR_EN] = QSERDES_V5_5NM_TX_HIGHZ_DRVR_EN,
+ [QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V5_5NM_TX_TRANSCEIVER_BIAS_EN,
+};
+
+static const unsigned int qmp_v6_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V5_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V5_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V5_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V5_PCS_POWER_DOWN_CONTROL,
+
+ /* In PCS_USB */
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V5_PCS_USB3_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V5_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
+
+ [QPHY_COM_RESETSM_CNTRL] = QSERDES_V6_COM_RESETSM_CNTRL,
+ [QPHY_COM_C_READY_STATUS] = QSERDES_V6_COM_C_READY_STATUS,
+ [QPHY_COM_CMN_STATUS] = QSERDES_V6_COM_CMN_STATUS,
+ [QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN,
+
+ [QPHY_DP_PHY_STATUS] = QSERDES_V6_DP_PHY_STATUS,
+
+ [QPHY_TX_TX_POL_INV] = QSERDES_V6_TX_TX_POL_INV,
+ [QPHY_TX_TX_DRV_LVL] = QSERDES_V6_TX_TX_DRV_LVL,
+ [QPHY_TX_TX_EMP_POST1_LVL] = QSERDES_V6_TX_TX_EMP_POST1_LVL,
+ [QPHY_TX_HIGHZ_DRVR_EN] = QSERDES_V6_TX_HIGHZ_DRVR_EN,
+ [QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V6_TX_TRANSCEIVER_BIAS_EN,
};
static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = {
@@ -1271,9 +1358,6 @@ struct qmp_phy_cfg {
int (*calibrate_dp_phy)(struct qmp_combo *qmp);
void (*dp_aux_init)(struct qmp_combo *qmp);
- /* clock ids to be requested */
- const char * const *clk_list;
- int num_clks;
/* resets to be requested */
const char * const *reset_list;
int num_resets;
@@ -1315,6 +1399,7 @@ struct qmp_combo {
struct clk *pipe_clk;
struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control_bulk_data *resets;
struct regulator_bulk_data *vregs;
@@ -1350,11 +1435,6 @@ static void qmp_v4_configure_dp_tx(struct qmp_combo *qmp);
static int qmp_v4_configure_dp_phy(struct qmp_combo *qmp);
static int qmp_v4_calibrate_dp_phy(struct qmp_combo *qmp);
-static int qmp_v5_configure_dp_phy(struct qmp_combo *qmp);
-
-static void qmp_v6_dp_aux_init(struct qmp_combo *qmp);
-static int qmp_v6_configure_dp_phy(struct qmp_combo *qmp);
-
static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
{
u32 reg;
@@ -1380,19 +1460,10 @@ static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
}
/* list of clocks required by phy */
-static const char * const qmp_v3_phy_clk_l[] = {
+static const char * const qmp_combo_phy_clk_l[] = {
"aux", "cfg_ahb", "ref", "com_aux",
};
-static const char * const qmp_v4_phy_clk_l[] = {
- "aux", "ref", "com_aux",
-};
-
-/* the primary usb3 phy on sm8250 doesn't have a ref clock */
-static const char * const qmp_v4_sm8250_usbphy_clk_l[] = {
- "aux", "ref_clk_src", "com_aux"
-};
-
/* list of resets */
static const char * const msm8996_usb3phy_reset_l[] = {
"phy", "common",
@@ -1433,6 +1504,8 @@ static const struct qmp_combo_offsets qmp_combo_offsets_v5 = {
};
static const struct qmp_phy_cfg sc7180_usb3dpphy_cfg = {
+ .offsets = &qmp_combo_offsets_v3,
+
.serdes_tbl = qmp_v3_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
.tx_tbl = qmp_v3_usb3_tx_tbl,
@@ -1466,8 +1539,6 @@ static const struct qmp_phy_cfg sc7180_usb3dpphy_cfg = {
.configure_dp_phy = qmp_v3_configure_dp_phy,
.calibrate_dp_phy = qmp_v3_calibrate_dp_phy,
- .clk_list = qmp_v3_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
.reset_list = sc7180_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -1478,6 +1549,8 @@ static const struct qmp_phy_cfg sc7180_usb3dpphy_cfg = {
};
static const struct qmp_phy_cfg sdm845_usb3dpphy_cfg = {
+ .offsets = &qmp_combo_offsets_v3,
+
.serdes_tbl = qmp_v3_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
.tx_tbl = qmp_v3_usb3_tx_tbl,
@@ -1511,8 +1584,6 @@ static const struct qmp_phy_cfg sdm845_usb3dpphy_cfg = {
.configure_dp_phy = qmp_v3_configure_dp_phy,
.calibrate_dp_phy = qmp_v3_calibrate_dp_phy,
- .clk_list = qmp_v3_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -1523,6 +1594,8 @@ static const struct qmp_phy_cfg sdm845_usb3dpphy_cfg = {
};
static const struct qmp_phy_cfg sc8180x_usb3dpphy_cfg = {
+ .offsets = &qmp_combo_offsets_v3,
+
.serdes_tbl = sm8150_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
.tx_tbl = sm8150_usb3_tx_tbl,
@@ -1558,13 +1631,11 @@ static const struct qmp_phy_cfg sc8180x_usb3dpphy_cfg = {
.configure_dp_phy = qmp_v4_configure_dp_phy,
.calibrate_dp_phy = qmp_v4_calibrate_dp_phy,
- .clk_list = qmp_v4_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3phy_regs_layout,
+ .regs = qmp_v45_usb3phy_regs_layout,
.pcs_usb_offset = 0x300,
.has_pwrdn_delay = true,
@@ -1603,16 +1674,14 @@ static const struct qmp_phy_cfg sc8280xp_usb43dpphy_cfg = {
.dp_aux_init = qmp_v4_dp_aux_init,
.configure_dp_tx = qmp_v4_configure_dp_tx,
- .configure_dp_phy = qmp_v5_configure_dp_phy,
+ .configure_dp_phy = qmp_v4_configure_dp_phy,
.calibrate_dp_phy = qmp_v4_calibrate_dp_phy,
- .clk_list = qmp_v4_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3phy_regs_layout,
+ .regs = qmp_v5_5nm_usb3phy_regs_layout,
};
static const struct qmp_phy_cfg sm6350_usb3dpphy_cfg = {
@@ -1651,8 +1720,6 @@ static const struct qmp_phy_cfg sm6350_usb3dpphy_cfg = {
.configure_dp_phy = qmp_v3_configure_dp_phy,
.calibrate_dp_phy = qmp_v3_calibrate_dp_phy,
- .clk_list = qmp_v4_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -1661,6 +1728,8 @@ static const struct qmp_phy_cfg sm6350_usb3dpphy_cfg = {
};
static const struct qmp_phy_cfg sm8250_usb3dpphy_cfg = {
+ .offsets = &qmp_combo_offsets_v3,
+
.serdes_tbl = sm8150_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
.tx_tbl = sm8250_usb3_tx_tbl,
@@ -1696,13 +1765,11 @@ static const struct qmp_phy_cfg sm8250_usb3dpphy_cfg = {
.configure_dp_phy = qmp_v4_configure_dp_phy,
.calibrate_dp_phy = qmp_v4_calibrate_dp_phy,
- .clk_list = qmp_v4_sm8250_usbphy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3phy_regs_layout,
+ .regs = qmp_v45_usb3phy_regs_layout,
.pcs_usb_offset = 0x300,
.has_pwrdn_delay = true,
@@ -1746,13 +1813,11 @@ static const struct qmp_phy_cfg sm8350_usb3dpphy_cfg = {
.configure_dp_phy = qmp_v4_configure_dp_phy,
.calibrate_dp_phy = qmp_v4_calibrate_dp_phy,
- .clk_list = qmp_v4_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3phy_regs_layout,
+ .regs = qmp_v45_usb3phy_regs_layout,
.has_pwrdn_delay = true,
};
@@ -1790,14 +1855,12 @@ static const struct qmp_phy_cfg sm8550_usb3dpphy_cfg = {
.swing_hbr3_hbr2 = &qmp_dp_v5_voltage_swing_hbr3_hbr2,
.pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2,
- .dp_aux_init = qmp_v6_dp_aux_init,
+ .dp_aux_init = qmp_v4_dp_aux_init,
.configure_dp_tx = qmp_v4_configure_dp_tx,
- .configure_dp_phy = qmp_v6_configure_dp_phy,
+ .configure_dp_phy = qmp_v4_configure_dp_phy,
.calibrate_dp_phy = qmp_v4_calibrate_dp_phy,
- .regs = qmp_v4_usb3phy_regs_layout,
- .clk_list = qmp_v4_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .regs = qmp_v6_usb3phy_regs_layout,
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -1865,6 +1928,8 @@ static int qmp_combo_dp_serdes_init(struct qmp_combo *qmp)
static void qmp_v3_dp_aux_init(struct qmp_combo *qmp)
{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL);
@@ -1872,7 +1937,7 @@ static void qmp_v3_dp_aux_init(struct qmp_combo *qmp)
/* Turn on BIAS current for PHY/PLL */
writel(QSERDES_V3_COM_BIAS_EN | QSERDES_V3_COM_BIAS_EN_MUX |
QSERDES_V3_COM_CLKBUF_L_EN | QSERDES_V3_COM_EN_SYSCLK_TX_SEL,
- qmp->dp_serdes + QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN);
+ qmp->dp_serdes + cfg->regs[QPHY_COM_BIAS_EN_CLKBUFLR_EN]);
writel(DP_PHY_PD_CTL_PSR_PWRDN, qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL);
@@ -1886,7 +1951,7 @@ static void qmp_v3_dp_aux_init(struct qmp_combo *qmp)
QSERDES_V3_COM_BIAS_EN_MUX | QSERDES_V3_COM_CLKBUF_R_EN |
QSERDES_V3_COM_CLKBUF_L_EN | QSERDES_V3_COM_EN_SYSCLK_TX_SEL |
QSERDES_V3_COM_CLKBUF_RX_DRIVE_L,
- qmp->dp_serdes + QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN);
+ qmp->dp_serdes + cfg->regs[QPHY_COM_BIAS_EN_CLKBUFLR_EN]);
writel(0x00, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG0);
writel(0x13, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG1);
@@ -1906,8 +1971,7 @@ static void qmp_v3_dp_aux_init(struct qmp_combo *qmp)
qmp->dp_dp_phy + QSERDES_V3_DP_PHY_AUX_INTERRUPT_MASK);
}
-static int qmp_combo_configure_dp_swing(struct qmp_combo *qmp,
- unsigned int drv_lvl_reg, unsigned int emp_post_reg)
+static int qmp_combo_configure_dp_swing(struct qmp_combo *qmp)
{
const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
const struct qmp_phy_cfg *cfg = qmp->cfg;
@@ -1936,10 +2000,10 @@ static int qmp_combo_configure_dp_swing(struct qmp_combo *qmp,
voltage_swing_cfg |= DP_PHY_TXn_TX_DRV_LVL_MUX_EN;
pre_emphasis_cfg |= DP_PHY_TXn_TX_EMP_POST1_LVL_MUX_EN;
- writel(voltage_swing_cfg, qmp->dp_tx + drv_lvl_reg);
- writel(pre_emphasis_cfg, qmp->dp_tx + emp_post_reg);
- writel(voltage_swing_cfg, qmp->dp_tx2 + drv_lvl_reg);
- writel(pre_emphasis_cfg, qmp->dp_tx2 + emp_post_reg);
+ writel(voltage_swing_cfg, qmp->dp_tx + cfg->regs[QPHY_TX_TX_DRV_LVL]);
+ writel(pre_emphasis_cfg, qmp->dp_tx + cfg->regs[QPHY_TX_TX_EMP_POST1_LVL]);
+ writel(voltage_swing_cfg, qmp->dp_tx2 + cfg->regs[QPHY_TX_TX_DRV_LVL]);
+ writel(pre_emphasis_cfg, qmp->dp_tx2 + cfg->regs[QPHY_TX_TX_EMP_POST1_LVL]);
return 0;
}
@@ -1949,8 +2013,7 @@ static void qmp_v3_configure_dp_tx(struct qmp_combo *qmp)
const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
u32 bias_en, drvr_en;
- if (qmp_combo_configure_dp_swing(qmp, QSERDES_V3_TX_TX_DRV_LVL,
- QSERDES_V3_TX_TX_EMP_POST1_LVL) < 0)
+ if (qmp_combo_configure_dp_swing(qmp) < 0)
return;
if (dp_opts->lanes == 1) {
@@ -1991,17 +2054,12 @@ static bool qmp_combo_configure_dp_mode(struct qmp_combo *qmp)
return reverse;
}
-static int qmp_v3_configure_dp_phy(struct qmp_combo *qmp)
+static int qmp_combo_configure_dp_clocks(struct qmp_combo *qmp)
{
const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
- u32 phy_vco_div, status;
+ u32 phy_vco_div;
unsigned long pixel_freq;
- qmp_combo_configure_dp_mode(qmp);
-
- writel(0x05, qmp->dp_dp_phy + QSERDES_V3_DP_PHY_TX0_TX1_LANE_CTL);
- writel(0x05, qmp->dp_dp_phy + QSERDES_V3_DP_PHY_TX2_TX3_LANE_CTL);
-
switch (dp_opts->link_rate) {
case 1620:
phy_vco_div = 0x1;
@@ -2023,20 +2081,38 @@ static int qmp_v3_configure_dp_phy(struct qmp_combo *qmp)
/* Other link rates aren't supported */
return -EINVAL;
}
- writel(phy_vco_div, qmp->dp_dp_phy + QSERDES_V3_DP_PHY_VCO_DIV);
+ writel(phy_vco_div, qmp->dp_dp_phy + QSERDES_V4_DP_PHY_VCO_DIV);
clk_set_rate(qmp->dp_link_hw.clk, dp_opts->link_rate * 100000);
clk_set_rate(qmp->dp_pixel_hw.clk, pixel_freq);
+ return 0;
+}
+
+static int qmp_v3_configure_dp_phy(struct qmp_combo *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ u32 status;
+ int ret;
+
+ qmp_combo_configure_dp_mode(qmp);
+
+ writel(0x05, qmp->dp_dp_phy + QSERDES_V3_DP_PHY_TX0_TX1_LANE_CTL);
+ writel(0x05, qmp->dp_dp_phy + QSERDES_V3_DP_PHY_TX2_TX3_LANE_CTL);
+
+ ret = qmp_combo_configure_dp_clocks(qmp);
+ if (ret)
+ return ret;
+
writel(0x04, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG2);
writel(0x01, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
writel(0x05, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
writel(0x01, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
writel(0x09, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
- writel(0x20, qmp->dp_serdes + QSERDES_V3_COM_RESETSM_CNTRL);
+ writel(0x20, qmp->dp_serdes + cfg->regs[QPHY_COM_RESETSM_CNTRL]);
- if (readl_poll_timeout(qmp->dp_serdes + QSERDES_V3_COM_C_READY_STATUS,
+ if (readl_poll_timeout(qmp->dp_serdes + cfg->regs[QPHY_COM_C_READY_STATUS],
status,
((status & BIT(0)) > 0),
500,
@@ -2045,7 +2121,7 @@ static int qmp_v3_configure_dp_phy(struct qmp_combo *qmp)
writel(0x19, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
- if (readl_poll_timeout(qmp->dp_dp_phy + QSERDES_V3_DP_PHY_STATUS,
+ if (readl_poll_timeout(qmp->dp_dp_phy + cfg->regs[QPHY_DP_PHY_STATUS],
status,
((status & BIT(1)) > 0),
500,
@@ -2056,7 +2132,7 @@ static int qmp_v3_configure_dp_phy(struct qmp_combo *qmp)
udelay(2000);
writel(0x19, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
- return readl_poll_timeout(qmp->dp_dp_phy + QSERDES_V3_DP_PHY_STATUS,
+ return readl_poll_timeout(qmp->dp_dp_phy + cfg->regs[QPHY_DP_PHY_STATUS],
status,
((status & BIT(1)) > 0),
500,
@@ -2083,39 +2159,14 @@ static int qmp_v3_calibrate_dp_phy(struct qmp_combo *qmp)
static void qmp_v4_dp_aux_init(struct qmp_combo *qmp)
{
- writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_PSR_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
- DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
- qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL);
-
- /* Turn on BIAS current for PHY/PLL */
- writel(0x17, qmp->dp_serdes + QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN);
-
- writel(0x00, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG0);
- writel(0x13, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG1);
- writel(0xa4, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG2);
- writel(0x00, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG3);
- writel(0x0a, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG4);
- writel(0x26, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG5);
- writel(0x0a, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG6);
- writel(0x03, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG7);
- writel(0xb7, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG8);
- writel(0x03, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG9);
- qmp->dp_aux_cfg = 0;
-
- writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK |
- PHY_AUX_SYNC_ERR_MASK | PHY_AUX_ALIGN_ERR_MASK |
- PHY_AUX_REQ_ERR_MASK,
- qmp->dp_dp_phy + QSERDES_V4_DP_PHY_AUX_INTERRUPT_MASK);
-}
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
-static void qmp_v6_dp_aux_init(struct qmp_combo *qmp)
-{
writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_PSR_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL);
/* Turn on BIAS current for PHY/PLL */
- writel(0x17, qmp->dp_serdes + QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN);
+ writel(0x17, qmp->dp_serdes + cfg->regs[QPHY_COM_BIAS_EN_CLKBUFLR_EN]);
writel(0x00, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG0);
writel(0x13, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG1);
@@ -2137,26 +2188,23 @@ static void qmp_v6_dp_aux_init(struct qmp_combo *qmp)
static void qmp_v4_configure_dp_tx(struct qmp_combo *qmp)
{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
/* Program default values before writing proper values */
- writel(0x27, qmp->dp_tx + QSERDES_V4_TX_TX_DRV_LVL);
- writel(0x27, qmp->dp_tx2 + QSERDES_V4_TX_TX_DRV_LVL);
+ writel(0x27, qmp->dp_tx + cfg->regs[QPHY_TX_TX_DRV_LVL]);
+ writel(0x27, qmp->dp_tx2 + cfg->regs[QPHY_TX_TX_DRV_LVL]);
- writel(0x20, qmp->dp_tx + QSERDES_V4_TX_TX_EMP_POST1_LVL);
- writel(0x20, qmp->dp_tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL);
+ writel(0x20, qmp->dp_tx + cfg->regs[QPHY_TX_TX_EMP_POST1_LVL]);
+ writel(0x20, qmp->dp_tx2 + cfg->regs[QPHY_TX_TX_EMP_POST1_LVL]);
- qmp_combo_configure_dp_swing(qmp, QSERDES_V4_TX_TX_DRV_LVL,
- QSERDES_V4_TX_TX_EMP_POST1_LVL);
+ qmp_combo_configure_dp_swing(qmp);
}
-static int qmp_v456_configure_dp_phy(struct qmp_combo *qmp,
- unsigned int com_resetm_ctrl_reg,
- unsigned int com_c_ready_status_reg,
- unsigned int com_cmn_status_reg,
- unsigned int dp_phy_status_reg)
+static int qmp_v456_configure_dp_phy(struct qmp_combo *qmp)
{
- const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
- u32 phy_vco_div, status;
- unsigned long pixel_freq;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ u32 status;
+ int ret;
writel(0x0f, qmp->dp_dp_phy + QSERDES_V4_DP_PHY_CFG_1);
@@ -2168,54 +2216,32 @@ static int qmp_v456_configure_dp_phy(struct qmp_combo *qmp,
writel(0x05, qmp->dp_dp_phy + QSERDES_V4_DP_PHY_TX0_TX1_LANE_CTL);
writel(0x05, qmp->dp_dp_phy + QSERDES_V4_DP_PHY_TX2_TX3_LANE_CTL);
- switch (dp_opts->link_rate) {
- case 1620:
- phy_vco_div = 0x1;
- pixel_freq = 1620000000UL / 2;
- break;
- case 2700:
- phy_vco_div = 0x1;
- pixel_freq = 2700000000UL / 2;
- break;
- case 5400:
- phy_vco_div = 0x2;
- pixel_freq = 5400000000UL / 4;
- break;
- case 8100:
- phy_vco_div = 0x0;
- pixel_freq = 8100000000UL / 6;
- break;
- default:
- /* Other link rates aren't supported */
- return -EINVAL;
- }
- writel(phy_vco_div, qmp->dp_dp_phy + QSERDES_V4_DP_PHY_VCO_DIV);
-
- clk_set_rate(qmp->dp_link_hw.clk, dp_opts->link_rate * 100000);
- clk_set_rate(qmp->dp_pixel_hw.clk, pixel_freq);
+ ret = qmp_combo_configure_dp_clocks(qmp);
+ if (ret)
+ return ret;
writel(0x01, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
writel(0x05, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
writel(0x01, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
writel(0x09, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
- writel(0x20, qmp->dp_serdes + com_resetm_ctrl_reg);
+ writel(0x20, qmp->dp_serdes + cfg->regs[QPHY_COM_RESETSM_CNTRL]);
- if (readl_poll_timeout(qmp->dp_serdes + com_c_ready_status_reg,
+ if (readl_poll_timeout(qmp->dp_serdes + cfg->regs[QPHY_COM_C_READY_STATUS],
status,
((status & BIT(0)) > 0),
500,
10000))
return -ETIMEDOUT;
- if (readl_poll_timeout(qmp->dp_serdes + com_cmn_status_reg,
+ if (readl_poll_timeout(qmp->dp_serdes + cfg->regs[QPHY_COM_CMN_STATUS],
status,
((status & BIT(0)) > 0),
500,
10000))
return -ETIMEDOUT;
- if (readl_poll_timeout(qmp->dp_serdes + com_cmn_status_reg,
+ if (readl_poll_timeout(qmp->dp_serdes + cfg->regs[QPHY_COM_CMN_STATUS],
status,
((status & BIT(1)) > 0),
500,
@@ -2224,14 +2250,14 @@ static int qmp_v456_configure_dp_phy(struct qmp_combo *qmp,
writel(0x19, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
- if (readl_poll_timeout(qmp->dp_dp_phy + dp_phy_status_reg,
+ if (readl_poll_timeout(qmp->dp_dp_phy + cfg->regs[QPHY_DP_PHY_STATUS],
status,
((status & BIT(0)) > 0),
500,
10000))
return -ETIMEDOUT;
- if (readl_poll_timeout(qmp->dp_dp_phy + dp_phy_status_reg,
+ if (readl_poll_timeout(qmp->dp_dp_phy + cfg->regs[QPHY_DP_PHY_STATUS],
status,
((status & BIT(1)) > 0),
500,
@@ -2243,16 +2269,14 @@ static int qmp_v456_configure_dp_phy(struct qmp_combo *qmp,
static int qmp_v4_configure_dp_phy(struct qmp_combo *qmp)
{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
bool reverse = (qmp->orientation == TYPEC_ORIENTATION_REVERSE);
const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
u32 status;
int ret;
- ret = qmp_v456_configure_dp_phy(qmp, QSERDES_V4_COM_RESETSM_CNTRL,
- QSERDES_V4_COM_C_READY_STATUS,
- QSERDES_V4_COM_CMN_STATUS,
- QSERDES_V4_DP_PHY_STATUS);
+ ret = qmp_v456_configure_dp_phy(qmp);
if (ret < 0)
return ret;
@@ -2278,150 +2302,32 @@ static int qmp_v4_configure_dp_phy(struct qmp_combo *qmp)
drvr1_en = 0x10;
}
- writel(drvr0_en, qmp->dp_tx + QSERDES_V4_TX_HIGHZ_DRVR_EN);
- writel(bias0_en, qmp->dp_tx + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN);
- writel(drvr1_en, qmp->dp_tx2 + QSERDES_V4_TX_HIGHZ_DRVR_EN);
- writel(bias1_en, qmp->dp_tx2 + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN);
-
- writel(0x18, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
- udelay(2000);
- writel(0x19, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
-
- if (readl_poll_timeout(qmp->dp_dp_phy + QSERDES_V4_DP_PHY_STATUS,
- status,
- ((status & BIT(1)) > 0),
- 500,
- 10000))
- return -ETIMEDOUT;
-
- writel(0x0a, qmp->dp_tx + QSERDES_V4_TX_TX_POL_INV);
- writel(0x0a, qmp->dp_tx2 + QSERDES_V4_TX_TX_POL_INV);
-
- writel(0x27, qmp->dp_tx + QSERDES_V4_TX_TX_DRV_LVL);
- writel(0x27, qmp->dp_tx2 + QSERDES_V4_TX_TX_DRV_LVL);
-
- writel(0x20, qmp->dp_tx + QSERDES_V4_TX_TX_EMP_POST1_LVL);
- writel(0x20, qmp->dp_tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL);
-
- return 0;
-}
-
-static int qmp_v5_configure_dp_phy(struct qmp_combo *qmp)
-{
- bool reverse = (qmp->orientation == TYPEC_ORIENTATION_REVERSE);
- const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
- u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
- u32 status;
- int ret;
-
- ret = qmp_v456_configure_dp_phy(qmp, QSERDES_V4_COM_RESETSM_CNTRL,
- QSERDES_V4_COM_C_READY_STATUS,
- QSERDES_V4_COM_CMN_STATUS,
- QSERDES_V4_DP_PHY_STATUS);
- if (ret < 0)
- return ret;
-
- if (dp_opts->lanes == 1) {
- bias0_en = reverse ? 0x3e : 0x1a;
- drvr0_en = reverse ? 0x13 : 0x10;
- bias1_en = reverse ? 0x15 : 0x3e;
- drvr1_en = reverse ? 0x10 : 0x13;
- } else if (dp_opts->lanes == 2) {
- bias0_en = reverse ? 0x3f : 0x15;
- drvr0_en = 0x10;
- bias1_en = reverse ? 0x15 : 0x3f;
- drvr1_en = 0x10;
- } else {
- bias0_en = 0x3f;
- bias1_en = 0x3f;
- drvr0_en = 0x10;
- drvr1_en = 0x10;
- }
-
- writel(drvr0_en, qmp->dp_tx + QSERDES_V5_5NM_TX_HIGHZ_DRVR_EN);
- writel(bias0_en, qmp->dp_tx + QSERDES_V5_5NM_TX_TRANSCEIVER_BIAS_EN);
- writel(drvr1_en, qmp->dp_tx2 + QSERDES_V5_5NM_TX_HIGHZ_DRVR_EN);
- writel(bias1_en, qmp->dp_tx2 + QSERDES_V5_5NM_TX_TRANSCEIVER_BIAS_EN);
+ writel(drvr0_en, qmp->dp_tx + cfg->regs[QPHY_TX_HIGHZ_DRVR_EN]);
+ writel(bias0_en, qmp->dp_tx + cfg->regs[QPHY_TX_TRANSCEIVER_BIAS_EN]);
+ writel(drvr1_en, qmp->dp_tx2 + cfg->regs[QPHY_TX_HIGHZ_DRVR_EN]);
+ writel(bias1_en, qmp->dp_tx2 + cfg->regs[QPHY_TX_TRANSCEIVER_BIAS_EN]);
writel(0x18, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
udelay(2000);
writel(0x19, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
- if (readl_poll_timeout(qmp->dp_dp_phy + QSERDES_V4_DP_PHY_STATUS,
+ if (readl_poll_timeout(qmp->dp_dp_phy + cfg->regs[QPHY_DP_PHY_STATUS],
status,
((status & BIT(1)) > 0),
500,
10000))
return -ETIMEDOUT;
- writel(0x0a, qmp->dp_tx + QSERDES_V5_5NM_TX_TX_POL_INV);
- writel(0x0a, qmp->dp_tx2 + QSERDES_V5_5NM_TX_TX_POL_INV);
+ writel(0x0a, qmp->dp_tx + cfg->regs[QPHY_TX_TX_POL_INV]);
+ writel(0x0a, qmp->dp_tx2 + cfg->regs[QPHY_TX_TX_POL_INV]);
- writel(0x27, qmp->dp_tx + QSERDES_V5_5NM_TX_TX_DRV_LVL);
- writel(0x27, qmp->dp_tx2 + QSERDES_V5_5NM_TX_TX_DRV_LVL);
+ writel(0x27, qmp->dp_tx + cfg->regs[QPHY_TX_TX_DRV_LVL]);
+ writel(0x27, qmp->dp_tx2 + cfg->regs[QPHY_TX_TX_DRV_LVL]);
- writel(0x20, qmp->dp_tx + QSERDES_V5_5NM_TX_TX_EMP_POST1_LVL);
- writel(0x20, qmp->dp_tx2 + QSERDES_V5_5NM_TX_TX_EMP_POST1_LVL);
+ writel(0x20, qmp->dp_tx + cfg->regs[QPHY_TX_TX_EMP_POST1_LVL]);
+ writel(0x20, qmp->dp_tx2 + cfg->regs[QPHY_TX_TX_EMP_POST1_LVL]);
return 0;
-}
-
-static int qmp_v6_configure_dp_phy(struct qmp_combo *qmp)
-{
- bool reverse = (qmp->orientation == TYPEC_ORIENTATION_REVERSE);
- const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
- u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
- u32 status;
- int ret;
-
- ret = qmp_v456_configure_dp_phy(qmp, QSERDES_V6_COM_RESETSM_CNTRL,
- QSERDES_V6_COM_C_READY_STATUS,
- QSERDES_V6_COM_CMN_STATUS,
- QSERDES_V6_DP_PHY_STATUS);
- if (ret < 0)
- return ret;
-
- if (dp_opts->lanes == 1) {
- bias0_en = reverse ? 0x3e : 0x1a;
- drvr0_en = reverse ? 0x13 : 0x10;
- bias1_en = reverse ? 0x15 : 0x3e;
- drvr1_en = reverse ? 0x10 : 0x13;
- } else if (dp_opts->lanes == 2) {
- bias0_en = reverse ? 0x3f : 0x15;
- drvr0_en = 0x10;
- bias1_en = reverse ? 0x15 : 0x3f;
- drvr1_en = 0x10;
- } else {
- bias0_en = 0x3f;
- bias1_en = 0x3f;
- drvr0_en = 0x10;
- drvr1_en = 0x10;
- }
-
- writel(drvr0_en, qmp->dp_tx + QSERDES_V4_TX_HIGHZ_DRVR_EN);
- writel(bias0_en, qmp->dp_tx + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN);
- writel(drvr1_en, qmp->dp_tx2 + QSERDES_V4_TX_HIGHZ_DRVR_EN);
- writel(bias1_en, qmp->dp_tx2 + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN);
-
- writel(0x18, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
- udelay(2000);
- writel(0x19, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
-
- if (readl_poll_timeout(qmp->dp_dp_phy + QSERDES_V6_DP_PHY_STATUS,
- status,
- ((status & BIT(1)) > 0),
- 500,
- 10000))
- return -ETIMEDOUT;
-
- writel(0x0a, qmp->dp_tx + QSERDES_V4_TX_TX_POL_INV);
- writel(0x0a, qmp->dp_tx2 + QSERDES_V4_TX_TX_POL_INV);
-
- writel(0x27, qmp->dp_tx + QSERDES_V4_TX_TX_DRV_LVL);
- writel(0x27, qmp->dp_tx2 + QSERDES_V4_TX_TX_DRV_LVL);
-
- writel(0x20, qmp->dp_tx + QSERDES_V4_TX_TX_EMP_POST1_LVL);
- writel(0x20, qmp->dp_tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL);
return 0;
}
@@ -2507,7 +2413,7 @@ static int qmp_combo_com_init(struct qmp_combo *qmp, bool force)
goto err_disable_regulators;
}
- ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ ret = clk_bulk_prepare_enable(qmp->num_clks, qmp->clks);
if (ret)
goto err_assert_reset;
@@ -2557,7 +2463,7 @@ static int qmp_combo_com_exit(struct qmp_combo *qmp, bool force)
reset_control_bulk_assert(cfg->num_resets, qmp->resets);
- clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+ clk_bulk_disable_unprepare(qmp->num_clks, qmp->clks);
regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
@@ -2836,7 +2742,6 @@ static void qmp_combo_disable_autonomous_mode(struct qmp_combo *qmp)
static int __maybe_unused qmp_combo_runtime_suspend(struct device *dev)
{
struct qmp_combo *qmp = dev_get_drvdata(dev);
- const struct qmp_phy_cfg *cfg = qmp->cfg;
dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qmp->mode);
@@ -2848,7 +2753,7 @@ static int __maybe_unused qmp_combo_runtime_suspend(struct device *dev)
qmp_combo_enable_autonomous_mode(qmp);
clk_disable_unprepare(qmp->pipe_clk);
- clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+ clk_bulk_disable_unprepare(qmp->num_clks, qmp->clks);
return 0;
}
@@ -2856,7 +2761,6 @@ static int __maybe_unused qmp_combo_runtime_suspend(struct device *dev)
static int __maybe_unused qmp_combo_runtime_resume(struct device *dev)
{
struct qmp_combo *qmp = dev_get_drvdata(dev);
- const struct qmp_phy_cfg *cfg = qmp->cfg;
int ret = 0;
dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qmp->mode);
@@ -2866,14 +2770,14 @@ static int __maybe_unused qmp_combo_runtime_resume(struct device *dev)
return 0;
}
- ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ ret = clk_bulk_prepare_enable(qmp->num_clks, qmp->clks);
if (ret)
return ret;
ret = clk_prepare_enable(qmp->pipe_clk);
if (ret) {
dev_err(dev, "pipe_clk enable failed, err=%d\n", ret);
- clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+ clk_bulk_disable_unprepare(qmp->num_clks, qmp->clks);
return ret;
}
@@ -2944,9 +2848,8 @@ static int qmp_combo_reset_init(struct qmp_combo *qmp)
static int qmp_combo_clk_init(struct qmp_combo *qmp)
{
- const struct qmp_phy_cfg *cfg = qmp->cfg;
struct device *dev = qmp->dev;
- int num = cfg->num_clks;
+ int num = ARRAY_SIZE(qmp_combo_phy_clk_l);
int i;
qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL);
@@ -2954,9 +2857,11 @@ static int qmp_combo_clk_init(struct qmp_combo *qmp)
return -ENOMEM;
for (i = 0; i < num; i++)
- qmp->clks[i].id = cfg->clk_list[i];
+ qmp->clks[i].id = qmp_combo_phy_clk_l[i];
+
+ qmp->num_clks = num;
- return devm_clk_bulk_get(dev, num, qmp->clks);
+ return devm_clk_bulk_get_optional(dev, num, qmp->clks);
}
static void phy_clk_release_provider(void *res)
@@ -3421,6 +3326,12 @@ static int qmp_combo_parse_dt_legacy(struct qmp_combo *qmp, struct device_node *
if (ret)
return ret;
+ ret = devm_clk_bulk_get_all(qmp->dev, &qmp->clks);
+ if (ret < 0)
+ return ret;
+
+ qmp->num_clks = ret;
+
return 0;
}
@@ -3431,6 +3342,7 @@ static int qmp_combo_parse_dt(struct qmp_combo *qmp)
const struct qmp_combo_offsets *offs = cfg->offsets;
struct device *dev = qmp->dev;
void __iomem *base;
+ int ret;
if (!offs)
return -EINVAL;
@@ -3460,6 +3372,10 @@ static int qmp_combo_parse_dt(struct qmp_combo *qmp)
}
qmp->dp_dp_phy = base + offs->dp_dp_phy;
+ ret = qmp_combo_clk_init(qmp);
+ if (ret)
+ return ret;
+
qmp->pipe_clk = devm_clk_get(dev, "usb3_pipe");
if (IS_ERR(qmp->pipe_clk)) {
return dev_err_probe(dev, PTR_ERR(qmp->pipe_clk),
@@ -3508,10 +3424,6 @@ static int qmp_combo_probe(struct platform_device *pdev)
mutex_init(&qmp->phy_mutex);
- ret = qmp_combo_clk_init(qmp);
- if (ret)
- return ret;
-
ret = qmp_combo_reset_init(qmp);
if (ret)
return ret;
@@ -3603,6 +3515,10 @@ static const struct of_device_id qmp_combo_of_match_table[] = {
.data = &sc7180_usb3dpphy_cfg,
},
{
+ .compatible = "qcom,sc7280-qmp-usb3-dp-phy",
+ .data = &sm8250_usb3dpphy_cfg,
+ },
+ {
.compatible = "qcom,sc8180x-qmp-usb3-dp-phy",
.data = &sc8180x_usb3dpphy_cfg,
},
@@ -3619,6 +3535,10 @@ static const struct of_device_id qmp_combo_of_match_table[] = {
.data = &sm6350_usb3dpphy_cfg,
},
{
+ .compatible = "qcom,sm8150-qmp-usb3-dp-phy",
+ .data = &sc8180x_usb3dpphy_cfg,
+ },
+ {
.compatible = "qcom,sm8250-qmp-usb3-dp-phy",
.data = &sm8250_usb3dpphy_cfg,
},
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
index 0c603bc06e09..ab61a9c73b18 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
index df505279edfd..a63ca7424974 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -13,7 +13,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/phy/pcie.h>
#include <linux/phy/phy.h>
@@ -1910,6 +1909,244 @@ static const struct qmp_phy_init_tbl sm8550_qmp_gen4x2_pcie_pcs_misc_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_G4_FOM_EQ_CONFIG5, 0xf2),
};
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_serdes_alt_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x46),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_CFG, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MISC1, 0x88),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORE_CLK_EN, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MODE, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_DC_LEVEL_CTRL, 0x0f),
+};
+
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_rc_serdes_alt_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x97),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0xd0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34),
+};
+
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_rx_alt_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_PI_CONTROLS, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B0, 0x9a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B1, 0xb0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B3, 0xf0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B4, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6, 0x29),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B0, 0x9a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B1, 0xfb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B2, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B3, 0xec),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B4, 0x43),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B5, 0xdd),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B6, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B0, 0xf3),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B1, 0xf8),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B2, 0xec),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B3, 0xd6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B4, 0x83),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B5, 0xf5),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B6, 0x5e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_PHPRE_CTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_0_1, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_2_3, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_3, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH4_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH5_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH6_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_Q_PI_INTRINSIC_BIAS_RATE32, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE3, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_SO_GAIN_RATE3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_CNTRL1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_MAN_VAL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x7c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_IDAC_SAOFFSET, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_DAC_ENABLE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_GM_CAL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH2, 0x1f),
+};
+
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_TX, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_RX, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_LANE_MODE_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_LANE_MODE_2, 0xf6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_LANE_MODE_3, 0x0f),
+};
+
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_EQ_CONFIG1, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN, 0x2e),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3, 0x28),
+};
+
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4_pcie_rc_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_POWER_STATE_CONFIG2, 0x1d),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+};
+
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_pcs_alt_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG4, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG5, 0x22),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_LANE1_INSIG_SW_CTRL2, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_LANE1_INSIG_MX_CTRL2, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_G3S2_PRE_GAIN, 0x2e),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_RX_SIGDET_LVL, 0x66),
+};
+
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x4_pcie_rx_alt_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_0_1, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_2_3, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_3, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_DAC_ENABLE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_GM_CAL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_PHPRE_CTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x7c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_IDAC_SAOFFSET, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH4_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH5_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH6_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_Q_PI_INTRINSIC_BIAS_RATE32, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B0, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B1, 0xb0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B3, 0xf0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B4, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B0, 0x9a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B1, 0xb6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B2, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B3, 0xf0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B4, 0x43),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B5, 0xdd),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B6, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B0, 0xf3),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B1, 0xf6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B2, 0xee),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B3, 0xd2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B4, 0x83),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B5, 0xf9),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B6, 0x3d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH2, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE3, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_SO_GAIN_RATE3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_PI_CONTROLS, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_CNTRL1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_MAN_VAL, 0x08),
+};
+
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x4_pcie_pcs_alt_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG4, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG5, 0x22),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_G3S2_PRE_GAIN, 0x2e),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_RX_SIGDET_LVL, 0x66),
+};
+
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x4_pcie_serdes_alt_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x46),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_CFG, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MISC1, 0x88),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORE_CLK_EN, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MODE, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_DC_LEVEL_CTRL, 0x0f),
+};
+
+
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x4_pcie_rc_serdes_alt_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x97),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0xd0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34),
+};
+
struct qmp_pcie_offsets {
u16 serdes;
u16 pcs;
@@ -1957,9 +2194,6 @@ struct qmp_phy_cfg {
const struct qmp_phy_init_tbl *serdes_4ln_tbl;
int serdes_4ln_num;
- /* clock ids to be requested */
- const char * const *clk_list;
- int num_clks;
/* resets to be requested */
const char * const *reset_list;
int num_resets;
@@ -2038,20 +2272,8 @@ static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
}
/* list of clocks required by phy */
-static const char * const ipq8074_pciephy_clk_l[] = {
- "aux", "cfg_ahb",
-};
-
-static const char * const msm8996_phy_clk_l[] = {
- "aux", "cfg_ahb", "ref",
-};
-
-static const char * const sc8280xp_pciephy_clk_l[] = {
- "aux", "cfg_ahb", "ref", "rchng",
-};
-
-static const char * const sdm845_pciephy_clk_l[] = {
- "aux", "cfg_ahb", "ref", "refgen",
+static const char * const qmp_pciephy_clk_l[] = {
+ "aux", "cfg_ahb", "ref", "refgen", "rchng", "phy_aux",
};
/* list of regulators */
@@ -2072,6 +2294,56 @@ static const char * const sdm845_pciephy_reset_l[] = {
"phy",
};
+static const struct qmp_pcie_offsets qmp_pcie_offsets_qhp = {
+ .serdes = 0,
+ .pcs = 0x1800,
+ .tx = 0x0800,
+ /* no .rx for QHP */
+};
+
+static const struct qmp_pcie_offsets qmp_pcie_offsets_v2 = {
+ .serdes = 0,
+ .pcs = 0x0800,
+ .tx = 0x0200,
+ .rx = 0x0400,
+};
+
+static const struct qmp_pcie_offsets qmp_pcie_offsets_v3 = {
+ .serdes = 0,
+ .pcs = 0x0800,
+ .pcs_misc = 0x0600,
+ .tx = 0x0200,
+ .rx = 0x0400,
+};
+
+static const struct qmp_pcie_offsets qmp_pcie_offsets_v4x1 = {
+ .serdes = 0,
+ .pcs = 0x0800,
+ .pcs_misc = 0x0c00,
+ .tx = 0x0200,
+ .rx = 0x0400,
+};
+
+static const struct qmp_pcie_offsets qmp_pcie_offsets_v4x2 = {
+ .serdes = 0,
+ .pcs = 0x0a00,
+ .pcs_misc = 0x0e00,
+ .tx = 0x0200,
+ .rx = 0x0400,
+ .tx2 = 0x0600,
+ .rx2 = 0x0800,
+};
+
+static const struct qmp_pcie_offsets qmp_pcie_offsets_v4_20 = {
+ .serdes = 0x1000,
+ .pcs = 0x1200,
+ .pcs_misc = 0x1600,
+ .tx = 0x0000,
+ .rx = 0x0200,
+ .tx2 = 0x0800,
+ .rx2 = 0x0a00,
+};
+
static const struct qmp_pcie_offsets qmp_pcie_offsets_v5 = {
.serdes = 0,
.pcs = 0x0200,
@@ -2082,6 +2354,26 @@ static const struct qmp_pcie_offsets qmp_pcie_offsets_v5 = {
.rx2 = 0x1800,
};
+static const struct qmp_pcie_offsets qmp_pcie_offsets_v5_20 = {
+ .serdes = 0x1000,
+ .pcs = 0x1200,
+ .pcs_misc = 0x1400,
+ .tx = 0x0000,
+ .rx = 0x0200,
+ .tx2 = 0x0800,
+ .rx2 = 0x0a00,
+};
+
+static const struct qmp_pcie_offsets qmp_pcie_offsets_v5_30 = {
+ .serdes = 0x2000,
+ .pcs = 0x2200,
+ .pcs_misc = 0x2400,
+ .tx = 0x0,
+ .rx = 0x0200,
+ .tx2 = 0x3800,
+ .rx2 = 0x3a00,
+};
+
static const struct qmp_pcie_offsets qmp_pcie_offsets_v6_20 = {
.serdes = 0x1000,
.pcs = 0x1200,
@@ -2096,6 +2388,8 @@ static const struct qmp_pcie_offsets qmp_pcie_offsets_v6_20 = {
static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
.lanes = 1,
+ .offsets = &qmp_pcie_offsets_v2,
+
.tbls = {
.serdes = ipq8074_pcie_serdes_tbl,
.serdes_num = ARRAY_SIZE(ipq8074_pcie_serdes_tbl),
@@ -2106,8 +2400,6 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
.pcs = ipq8074_pcie_pcs_tbl,
.pcs_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl),
},
- .clk_list = ipq8074_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l),
.reset_list = ipq8074_pciephy_reset_l,
.num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
.vreg_list = NULL,
@@ -2121,6 +2413,8 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
static const struct qmp_phy_cfg ipq8074_pciephy_gen3_cfg = {
.lanes = 1,
+ .offsets = &qmp_pcie_offsets_v4x1,
+
.tbls = {
.serdes = ipq8074_pcie_gen3_serdes_tbl,
.serdes_num = ARRAY_SIZE(ipq8074_pcie_gen3_serdes_tbl),
@@ -2133,8 +2427,6 @@ static const struct qmp_phy_cfg ipq8074_pciephy_gen3_cfg = {
.pcs_misc = ipq8074_pcie_gen3_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(ipq8074_pcie_gen3_pcs_misc_tbl),
},
- .clk_list = ipq8074_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l),
.reset_list = ipq8074_pciephy_reset_l,
.num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
.vreg_list = NULL,
@@ -2150,6 +2442,8 @@ static const struct qmp_phy_cfg ipq8074_pciephy_gen3_cfg = {
static const struct qmp_phy_cfg ipq6018_pciephy_cfg = {
.lanes = 1,
+ .offsets = &qmp_pcie_offsets_v4x1,
+
.tbls = {
.serdes = ipq6018_pcie_serdes_tbl,
.serdes_num = ARRAY_SIZE(ipq6018_pcie_serdes_tbl),
@@ -2162,8 +2456,6 @@ static const struct qmp_phy_cfg ipq6018_pciephy_cfg = {
.pcs_misc = ipq6018_pcie_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(ipq6018_pcie_pcs_misc_tbl),
},
- .clk_list = ipq8074_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l),
.reset_list = ipq8074_pciephy_reset_l,
.num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
.vreg_list = NULL,
@@ -2177,6 +2469,8 @@ static const struct qmp_phy_cfg ipq6018_pciephy_cfg = {
static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
.lanes = 1,
+ .offsets = &qmp_pcie_offsets_v3,
+
.tbls = {
.serdes = sdm845_qmp_pcie_serdes_tbl,
.serdes_num = ARRAY_SIZE(sdm845_qmp_pcie_serdes_tbl),
@@ -2189,8 +2483,6 @@ static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
.pcs_misc = sdm845_qmp_pcie_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(sdm845_qmp_pcie_pcs_misc_tbl),
},
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2204,6 +2496,8 @@ static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = {
.lanes = 1,
+ .offsets = &qmp_pcie_offsets_qhp,
+
.tbls = {
.serdes = sdm845_qhp_pcie_serdes_tbl,
.serdes_num = ARRAY_SIZE(sdm845_qhp_pcie_serdes_tbl),
@@ -2212,8 +2506,6 @@ static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = {
.pcs = sdm845_qhp_pcie_pcs_tbl,
.pcs_num = ARRAY_SIZE(sdm845_qhp_pcie_pcs_tbl),
},
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2227,6 +2519,8 @@ static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = {
static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = {
.lanes = 1,
+ .offsets = &qmp_pcie_offsets_v4x1,
+
.tbls = {
.serdes = sm8250_qmp_pcie_serdes_tbl,
.serdes_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl),
@@ -2249,8 +2543,6 @@ static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = {
.pcs_misc = sm8250_qmp_gen3x1_pcie_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_misc_tbl),
},
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2264,6 +2556,8 @@ static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = {
static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = {
.lanes = 2,
+ .offsets = &qmp_pcie_offsets_v4x2,
+
.tbls = {
.serdes = sm8250_qmp_pcie_serdes_tbl,
.serdes_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl),
@@ -2286,8 +2580,6 @@ static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = {
.pcs_misc = sm8250_qmp_gen3x2_pcie_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_misc_tbl),
},
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2301,6 +2593,8 @@ static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = {
static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
.lanes = 1,
+ .offsets = &qmp_pcie_offsets_v3,
+
.tbls = {
.serdes = msm8998_pcie_serdes_tbl,
.serdes_num = ARRAY_SIZE(msm8998_pcie_serdes_tbl),
@@ -2311,8 +2605,6 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
.pcs = msm8998_pcie_pcs_tbl,
.pcs_num = ARRAY_SIZE(msm8998_pcie_pcs_tbl),
},
- .clk_list = msm8996_phy_clk_l,
- .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
.reset_list = ipq8074_pciephy_reset_l,
.num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2328,6 +2620,8 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
static const struct qmp_phy_cfg sc8180x_pciephy_cfg = {
.lanes = 2,
+ .offsets = &qmp_pcie_offsets_v4x2,
+
.tbls = {
.serdes = sc8180x_qmp_pcie_serdes_tbl,
.serdes_num = ARRAY_SIZE(sc8180x_qmp_pcie_serdes_tbl),
@@ -2340,8 +2634,6 @@ static const struct qmp_phy_cfg sc8180x_pciephy_cfg = {
.pcs_misc = sc8180x_qmp_pcie_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(sc8180x_qmp_pcie_pcs_misc_tbl),
},
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2375,8 +2667,6 @@ static const struct qmp_phy_cfg sc8280xp_qmp_gen3x1_pciephy_cfg = {
.serdes_num = ARRAY_SIZE(sc8280xp_qmp_gen3x1_pcie_rc_serdes_tbl),
},
- .clk_list = sc8280xp_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sc8280xp_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2410,8 +2700,6 @@ static const struct qmp_phy_cfg sc8280xp_qmp_gen3x2_pciephy_cfg = {
.serdes_num = ARRAY_SIZE(sc8280xp_qmp_gen3x2_pcie_rc_serdes_tbl),
},
- .clk_list = sc8280xp_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sc8280xp_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2448,8 +2736,6 @@ static const struct qmp_phy_cfg sc8280xp_qmp_gen3x4_pciephy_cfg = {
.serdes_4ln_tbl = sc8280xp_qmp_gen3x4_pcie_serdes_4ln_tbl,
.serdes_4ln_num = ARRAY_SIZE(sc8280xp_qmp_gen3x4_pcie_serdes_4ln_tbl),
- .clk_list = sc8280xp_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sc8280xp_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2463,6 +2749,8 @@ static const struct qmp_phy_cfg sc8280xp_qmp_gen3x4_pciephy_cfg = {
static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = {
.lanes = 2,
+ .offsets = &qmp_pcie_offsets_v4_20,
+
.tbls = {
.serdes = sdx55_qmp_pcie_serdes_tbl,
.serdes_num = ARRAY_SIZE(sdx55_qmp_pcie_serdes_tbl),
@@ -2490,8 +2778,6 @@ static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = {
.pcs_misc_num = ARRAY_SIZE(sdx55_qmp_pcie_ep_pcs_misc_tbl),
},
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2527,8 +2813,6 @@ static const struct qmp_phy_cfg sm8350_qmp_gen3x1_pciephy_cfg = {
.rx_num = ARRAY_SIZE(sm8350_qmp_gen3x1_pcie_rc_rx_tbl),
},
- .clk_list = sc8280xp_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sc8280xp_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2564,8 +2848,6 @@ static const struct qmp_phy_cfg sm8350_qmp_gen3x2_pciephy_cfg = {
.pcs_num = ARRAY_SIZE(sm8350_qmp_gen3x2_pcie_rc_pcs_tbl),
},
- .clk_list = sc8280xp_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sc8280xp_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2593,8 +2875,6 @@ static const struct qmp_phy_cfg sdx65_qmp_pciephy_cfg = {
.pcs_misc = sdx65_qmp_pcie_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(sdx65_qmp_pcie_pcs_misc_tbl),
},
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2608,6 +2888,8 @@ static const struct qmp_phy_cfg sdx65_qmp_pciephy_cfg = {
static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = {
.lanes = 1,
+ .offsets = &qmp_pcie_offsets_v5,
+
.tbls = {
.serdes = sm8450_qmp_gen3_pcie_serdes_tbl,
.serdes_num = ARRAY_SIZE(sm8450_qmp_gen3_pcie_serdes_tbl),
@@ -2628,8 +2910,6 @@ static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = {
.rx_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_rc_rx_tbl),
},
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2643,6 +2923,8 @@ static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = {
static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = {
.lanes = 2,
+ .offsets = &qmp_pcie_offsets_v5_20,
+
.tbls = {
.serdes = sm8450_qmp_gen4x2_pcie_serdes_tbl,
.serdes_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_serdes_tbl),
@@ -2670,8 +2952,6 @@ static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = {
.pcs_misc_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_ep_pcs_misc_tbl),
},
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2699,8 +2979,6 @@ static const struct qmp_phy_cfg sm8550_qmp_gen3x2_pciephy_cfg = {
.pcs_misc = sm8550_qmp_gen3x2_pcie_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(sm8550_qmp_gen3x2_pcie_pcs_misc_tbl),
},
- .clk_list = sc8280xp_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sc8280xp_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2730,8 +3008,6 @@ static const struct qmp_phy_cfg sm8550_qmp_gen4x2_pciephy_cfg = {
.ln_shrd = sm8550_qmp_gen4x2_pcie_ln_shrd_tbl,
.ln_shrd_num = ARRAY_SIZE(sm8550_qmp_gen4x2_pcie_ln_shrd_tbl),
},
- .clk_list = sc8280xp_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sc8280xp_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = sm8550_qmp_phy_vreg_l,
@@ -2743,6 +3019,74 @@ static const struct qmp_phy_cfg sm8550_qmp_gen4x2_pciephy_cfg = {
.has_nocsr_reset = true,
};
+static const struct qmp_phy_cfg sa8775p_qmp_gen4x2_pciephy_cfg = {
+ .lanes = 2,
+ .offsets = &qmp_pcie_offsets_v5_20,
+
+ .tbls = {
+ .serdes = sa8775p_qmp_gen4x2_pcie_serdes_alt_tbl,
+ .serdes_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_serdes_alt_tbl),
+ .tx = sa8775p_qmp_gen4_pcie_tx_tbl,
+ .tx_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_tx_tbl),
+ .rx = sa8775p_qmp_gen4x2_pcie_rx_alt_tbl,
+ .rx_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_rx_alt_tbl),
+ .pcs = sa8775p_qmp_gen4x2_pcie_pcs_alt_tbl,
+ .pcs_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_pcs_alt_tbl),
+ .pcs_misc = sa8775p_qmp_gen4_pcie_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_pcs_misc_tbl),
+ },
+
+ .tbls_rc = &(const struct qmp_phy_cfg_tbls) {
+ .serdes = sa8775p_qmp_gen4x2_pcie_rc_serdes_alt_tbl,
+ .serdes_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_rc_serdes_alt_tbl),
+ .pcs_misc = sa8775p_qmp_gen4_pcie_rc_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_rc_pcs_misc_tbl),
+ },
+
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = pciephy_v5_regs_layout,
+
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS_4_20,
+};
+
+static const struct qmp_phy_cfg sa8775p_qmp_gen4x4_pciephy_cfg = {
+ .lanes = 4,
+ .offsets = &qmp_pcie_offsets_v5_30,
+
+ .tbls = {
+ .serdes = sa8775p_qmp_gen4x4_pcie_serdes_alt_tbl,
+ .serdes_num = ARRAY_SIZE(sa8775p_qmp_gen4x4_pcie_serdes_alt_tbl),
+ .tx = sa8775p_qmp_gen4_pcie_tx_tbl,
+ .tx_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_tx_tbl),
+ .rx = sa8775p_qmp_gen4x4_pcie_rx_alt_tbl,
+ .rx_num = ARRAY_SIZE(sa8775p_qmp_gen4x4_pcie_rx_alt_tbl),
+ .pcs = sa8775p_qmp_gen4x4_pcie_pcs_alt_tbl,
+ .pcs_num = ARRAY_SIZE(sa8775p_qmp_gen4x4_pcie_pcs_alt_tbl),
+ .pcs_misc = sa8775p_qmp_gen4_pcie_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_pcs_misc_tbl),
+ },
+
+ .tbls_rc = &(const struct qmp_phy_cfg_tbls) {
+ .serdes = sa8775p_qmp_gen4x4_pcie_rc_serdes_alt_tbl,
+ .serdes_num = ARRAY_SIZE(sa8775p_qmp_gen4x4_pcie_rc_serdes_alt_tbl),
+ .pcs_misc = sa8775p_qmp_gen4_pcie_rc_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_rc_pcs_misc_tbl),
+ },
+
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = pciephy_v5_regs_layout,
+
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS_4_20,
+};
+
static void qmp_pcie_configure_lane(void __iomem *base,
const struct qmp_phy_init_tbl tbl[],
int num,
@@ -2855,7 +3199,7 @@ static int qmp_pcie_init(struct phy *phy)
goto err_assert_reset;
}
- ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(qmp_pciephy_clk_l), qmp->clks);
if (ret)
goto err_assert_reset;
@@ -2876,7 +3220,7 @@ static int qmp_pcie_exit(struct phy *phy)
reset_control_bulk_assert(cfg->num_resets, qmp->resets);
- clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(qmp_pciephy_clk_l), qmp->clks);
regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
@@ -3059,9 +3403,8 @@ static int qmp_pcie_reset_init(struct qmp_pcie *qmp)
static int qmp_pcie_clk_init(struct qmp_pcie *qmp)
{
- const struct qmp_phy_cfg *cfg = qmp->cfg;
struct device *dev = qmp->dev;
- int num = cfg->num_clks;
+ int num = ARRAY_SIZE(qmp_pciephy_clk_l);
int i;
qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL);
@@ -3069,9 +3412,9 @@ static int qmp_pcie_clk_init(struct qmp_pcie *qmp)
return -ENOMEM;
for (i = 0; i < num; i++)
- qmp->clks[i].id = cfg->clk_list[i];
+ qmp->clks[i].id = qmp_pciephy_clk_l[i];
- return devm_clk_bulk_get(dev, num, qmp->clks);
+ return devm_clk_bulk_get_optional(dev, num, qmp->clks);
}
static void phy_clk_release_provider(void *res)
@@ -3378,6 +3721,12 @@ static const struct of_device_id qmp_pcie_of_match_table[] = {
.compatible = "qcom,msm8998-qmp-pcie-phy",
.data = &msm8998_pciephy_cfg,
}, {
+ .compatible = "qcom,sa8775p-qmp-gen4x2-pcie-phy",
+ .data = &sa8775p_qmp_gen4x2_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sa8775p-qmp-gen4x4-pcie-phy",
+ .data = &sa8775p_qmp_gen4x4_pciephy_cfg,
+ }, {
.compatible = "qcom,sc8180x-qmp-pcie-phy",
.data = &sc8180x_pciephy_cfg,
}, {
@@ -3402,6 +3751,12 @@ static const struct of_device_id qmp_pcie_of_match_table[] = {
.compatible = "qcom,sdx65-qmp-gen4x2-pcie-phy",
.data = &sdx65_qmp_pciephy_cfg,
}, {
+ .compatible = "qcom,sm8150-qmp-gen3x1-pcie-phy",
+ .data = &sm8250_qmp_gen3x1_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sm8150-qmp-gen3x2-pcie-phy",
+ .data = &sm8250_qmp_gen3x2_pciephy_cfg,
+ }, {
.compatible = "qcom,sm8250-qmp-gen3x1-pcie-phy",
.data = &sm8250_qmp_gen3x1_pciephy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
index a3a056741fc7..cdf8c04ea078 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
@@ -7,6 +7,7 @@
#define QCOM_PHY_QMP_PCS_PCIE_V5_20_H_
/* Only for QMP V5_20 PHY - PCIe PCS registers */
+#define QPHY_V5_20_PCS_PCIE_POWER_STATE_CONFIG2 0x00c
#define QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x01c
#define QPHY_V5_20_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5 0x084
#define QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS 0x090
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_20.h
index c7b12c1fb7f5..cf91154eed13 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_20.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_20.h
@@ -19,6 +19,7 @@
/* Only for QMP V5_20 PHY - RX registers */
#define QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE2 0x008
#define QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE3 0x00c
+#define QSERDES_V5_20_RX_UCDR_SO_GAIN_RATE3 0x01c
#define QSERDES_V5_20_RX_UCDR_PI_CONTROLS 0x020
#define QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_0_1 0x02c
#define QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_2_3 0x030
@@ -80,5 +81,6 @@
#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH4_RATE3 0x210
#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH5_RATE3 0x218
#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH6_RATE3 0x220
+#define QSERDES_V5_20_RX_Q_PI_INTRINSIC_BIAS_RATE32 0x238
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h
index a69233e68f9a..8883e1de730e 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h
@@ -7,6 +7,8 @@
#define QCOM_PHY_QMP_QSERDES_TXRX_USB_V6_H_
#define QSERDES_V6_TX_CLKBUF_ENABLE 0x08
+#define QSERDES_V6_TX_TX_EMP_POST1_LVL 0x0c
+#define QSERDES_V6_TX_TX_DRV_LVL 0x14
#define QSERDES_V6_TX_RESET_TSYNC_EN 0x1c
#define QSERDES_V6_TX_PRE_STALL_LDO_BOOST_EN 0x20
#define QSERDES_V6_TX_TX_BAND 0x24
@@ -15,6 +17,9 @@
#define QSERDES_V6_TX_RES_CODE_LANE_RX 0x38
#define QSERDES_V6_TX_RES_CODE_LANE_OFFSET_TX 0x3c
#define QSERDES_V6_TX_RES_CODE_LANE_OFFSET_RX 0x40
+#define QSERDES_V6_TX_TRANSCEIVER_BIAS_EN 0x54
+#define QSERDES_V6_TX_HIGHZ_DRVR_EN 0x58
+#define QSERDES_V6_TX_TX_POL_INV 0x5c
#define QSERDES_V6_TX_PARRATE_REC_DETECT_IDLE_EN 0x60
#define QSERDES_V6_TX_BIST_PATTERN7 0x7c
#define QSERDES_V6_TX_LANE_MODE_1 0x84
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
index 8c877b668bb9..3927eba8e468 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -833,6 +832,8 @@ static const struct qmp_ufs_offsets qmp_ufs_offsets_v6 = {
static const struct qmp_phy_cfg msm8996_ufsphy_cfg = {
.lanes = 1,
+ .offsets = &qmp_ufs_offsets,
+
.tbls = {
.serdes = msm8996_ufsphy_serdes,
.serdes_num = ARRAY_SIZE(msm8996_ufsphy_serdes),
@@ -924,6 +925,8 @@ static const struct qmp_phy_cfg sc8280xp_ufsphy_cfg = {
static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
.lanes = 2,
+ .offsets = &qmp_ufs_offsets,
+
.tbls = {
.serdes = sdm845_ufsphy_serdes,
.serdes_num = ARRAY_SIZE(sdm845_ufsphy_serdes),
@@ -1006,6 +1009,8 @@ static const struct qmp_phy_cfg sm7150_ufsphy_cfg = {
static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
.lanes = 2,
+ .offsets = &qmp_ufs_offsets,
+
.tbls = {
.serdes = sm8150_ufsphy_serdes,
.serdes_num = ARRAY_SIZE(sm8150_ufsphy_serdes),
@@ -1038,6 +1043,8 @@ static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
static const struct qmp_phy_cfg sm8250_ufsphy_cfg = {
.lanes = 2,
+ .offsets = &qmp_ufs_offsets,
+
.tbls = {
.serdes = sm8150_ufsphy_serdes,
.serdes_num = ARRAY_SIZE(sm8150_ufsphy_serdes),
@@ -1070,6 +1077,8 @@ static const struct qmp_phy_cfg sm8250_ufsphy_cfg = {
static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
.lanes = 2,
+ .offsets = &qmp_ufs_offsets,
+
.tbls = {
.serdes = sm8350_ufsphy_serdes,
.serdes_num = ARRAY_SIZE(sm8350_ufsphy_serdes),
@@ -1102,6 +1111,8 @@ static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
static const struct qmp_phy_cfg sm8450_ufsphy_cfg = {
.lanes = 2,
+ .offsets = &qmp_ufs_offsets,
+
.tbls = {
.serdes = sm8350_ufsphy_serdes,
.serdes_num = ARRAY_SIZE(sm8350_ufsphy_serdes),
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c
new file mode 100644
index 000000000000..cf466f6df94d
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c
@@ -0,0 +1,1407 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "phy-qcom-qmp.h"
+#include "phy-qcom-qmp-pcs-misc-v3.h"
+#include "phy-qcom-qmp-pcs-usb-v4.h"
+#include "phy-qcom-qmp-pcs-usb-v5.h"
+
+/* QPHY_SW_RESET bit */
+#define SW_RESET BIT(0)
+/* QPHY_POWER_DOWN_CONTROL */
+#define SW_PWRDN BIT(0)
+/* QPHY_START_CONTROL bits */
+#define SERDES_START BIT(0)
+#define PCS_START BIT(1)
+/* QPHY_PCS_STATUS bit */
+#define PHYSTATUS BIT(6)
+
+/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
+/* DP PHY soft reset */
+#define SW_DPPHY_RESET BIT(0)
+/* mux to select DP PHY reset control, 0:HW control, 1: software reset */
+#define SW_DPPHY_RESET_MUX BIT(1)
+/* USB3 PHY soft reset */
+#define SW_USB3PHY_RESET BIT(2)
+/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */
+#define SW_USB3PHY_RESET_MUX BIT(3)
+
+/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */
+#define USB3_MODE BIT(0) /* enables USB3 mode */
+#define DP_MODE BIT(1) /* enables DP mode */
+
+/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
+#define ARCVR_DTCT_EN BIT(0)
+#define ALFPS_DTCT_EN BIT(1)
+#define ARCVR_DTCT_EVENT_SEL BIT(4)
+
+/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
+#define IRQ_CLEAR BIT(0)
+
+/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
+#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
+
+#define PHY_INIT_COMPLETE_TIMEOUT 10000
+
+struct qmp_phy_init_tbl {
+ unsigned int offset;
+ unsigned int val;
+ /*
+ * mask of lanes for which this register is written
+ * for cases when second lane needs different values
+ */
+ u8 lane_mask;
+};
+
+#define QMP_PHY_INIT_CFG(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .lane_mask = 0xff, \
+ }
+
+#define QMP_PHY_INIT_CFG_LANE(o, v, l) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .lane_mask = l, \
+ }
+
+/* set of registers with offsets different per-PHY */
+enum qphy_reg_layout {
+ /* PCS registers */
+ QPHY_SW_RESET,
+ QPHY_START_CTRL,
+ QPHY_PCS_STATUS,
+ QPHY_PCS_AUTONOMOUS_MODE_CTRL,
+ QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ QPHY_PCS_POWER_DOWN_CONTROL,
+ /* Keep last to ensure regs_layout arrays are properly initialized */
+ QPHY_LAYOUT_SIZE
+};
+
+static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V3_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V3_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V3_PCS_PCS_STATUS,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V3_PCS_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V3_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V3_PCS_POWER_DOWN_CONTROL,
+};
+
+static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V4_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V4_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V4_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V4_PCS_POWER_DOWN_CONTROL,
+
+ /* In PCS_USB */
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V4_PCS_USB3_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V4_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
+};
+
+static const unsigned int qmp_v5_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V5_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V5_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V5_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V5_PCS_POWER_DOWN_CONTROL,
+
+ /* In PCS_USB */
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V5_PCS_USB3_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V5_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_pcs_tbl[] = {
+ /* FLL settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xba),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x1d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_IPTRIM, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_TX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_RX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x20),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x94),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb3),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VTH_CODE, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_pcs_tbl[] = {
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_TX, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_RX, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_TX_PI_QEC_CTRL, 0x40, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_TX_PI_QEC_CTRL, 0x54, 2),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_LOW, 0xff, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f, 2),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x7f, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xff, 2),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x97),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb4),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VTH_CODE, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xa9),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_TX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_RX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0x35),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_5, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xbb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xbb),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3d, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3c, 2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xd2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x13),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VTH_CODE, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+};
+
+struct qmp_usb_legacy_offsets {
+ u16 serdes;
+ u16 pcs;
+ u16 pcs_usb;
+ u16 tx;
+ u16 rx;
+};
+
+/* struct qmp_phy_cfg - per-PHY initialization config */
+struct qmp_phy_cfg {
+ int lanes;
+
+ const struct qmp_usb_legacy_offsets *offsets;
+
+ /* Init sequence for PHY blocks - serdes, tx, rx, pcs */
+ const struct qmp_phy_init_tbl *serdes_tbl;
+ int serdes_tbl_num;
+ const struct qmp_phy_init_tbl *tx_tbl;
+ int tx_tbl_num;
+ const struct qmp_phy_init_tbl *rx_tbl;
+ int rx_tbl_num;
+ const struct qmp_phy_init_tbl *pcs_tbl;
+ int pcs_tbl_num;
+ const struct qmp_phy_init_tbl *pcs_usb_tbl;
+ int pcs_usb_tbl_num;
+
+ /* clock ids to be requested */
+ const char * const *clk_list;
+ int num_clks;
+ /* resets to be requested */
+ const char * const *reset_list;
+ int num_resets;
+ /* regulators to be requested */
+ const char * const *vreg_list;
+ int num_vregs;
+
+ /* array of registers with different offsets */
+ const unsigned int *regs;
+
+ /* Offset from PCS to PCS_USB region */
+ unsigned int pcs_usb_offset;
+};
+
+struct qmp_usb {
+ struct device *dev;
+
+ const struct qmp_phy_cfg *cfg;
+
+ void __iomem *serdes;
+ void __iomem *pcs;
+ void __iomem *pcs_misc;
+ void __iomem *pcs_usb;
+ void __iomem *tx;
+ void __iomem *rx;
+ void __iomem *tx2;
+ void __iomem *rx2;
+
+ void __iomem *dp_com;
+
+ struct clk *pipe_clk;
+ struct clk_bulk_data *clks;
+ struct reset_control_bulk_data *resets;
+ struct regulator_bulk_data *vregs;
+
+ enum phy_mode mode;
+
+ struct phy *phy;
+
+ struct clk_fixed_rate pipe_clk_fixed;
+};
+
+static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg |= val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg &= ~val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+/* list of clocks required by phy */
+static const char * const qmp_v3_phy_clk_l[] = {
+ "aux", "cfg_ahb", "ref", "com_aux",
+};
+
+static const char * const qmp_v4_ref_phy_clk_l[] = {
+ "aux", "ref_clk_src", "ref", "com_aux",
+};
+
+/* the primary usb3 phy on sm8250 doesn't have a ref clock */
+static const char * const qmp_v4_sm8250_usbphy_clk_l[] = {
+ "aux", "ref_clk_src", "com_aux"
+};
+
+/* list of resets */
+static const char * const msm8996_usb3phy_reset_l[] = {
+ "phy", "common",
+};
+
+static const char * const sc7180_usb3phy_reset_l[] = {
+ "phy",
+};
+
+/* list of regulators */
+static const char * const qmp_phy_vreg_l[] = {
+ "vdda-phy", "vdda-pll",
+};
+
+static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
+ .lanes = 2,
+
+ .serdes_tbl = qmp_v3_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
+ .tx_tbl = qmp_v3_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl),
+ .rx_tbl = qmp_v3_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl),
+ .pcs_tbl = qmp_v3_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl),
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+};
+
+static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
+ .lanes = 2,
+
+ .serdes_tbl = qmp_v3_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
+ .tx_tbl = qmp_v3_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl),
+ .rx_tbl = qmp_v3_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl),
+ .pcs_tbl = qmp_v3_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl),
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = sc7180_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+};
+
+static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
+ .lanes = 2,
+
+ .serdes_tbl = sm8150_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
+ .tx_tbl = sm8150_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8150_usb3_tx_tbl),
+ .rx_tbl = sm8150_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8150_usb3_rx_tbl),
+ .pcs_tbl = sm8150_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_tbl),
+ .pcs_usb_tbl = sm8150_usb3_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_usb_tbl),
+ .clk_list = qmp_v4_ref_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_ref_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x300,
+};
+
+static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
+ .lanes = 2,
+
+ .serdes_tbl = sm8150_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
+ .tx_tbl = sm8250_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8250_usb3_tx_tbl),
+ .rx_tbl = sm8250_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8250_usb3_rx_tbl),
+ .pcs_tbl = sm8250_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_pcs_tbl),
+ .pcs_usb_tbl = sm8250_usb3_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8250_usb3_pcs_usb_tbl),
+ .clk_list = qmp_v4_sm8250_usbphy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x300,
+};
+
+static const struct qmp_phy_cfg sm8350_usb3phy_cfg = {
+ .lanes = 2,
+
+ .serdes_tbl = sm8150_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
+ .tx_tbl = sm8350_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8350_usb3_tx_tbl),
+ .rx_tbl = sm8350_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8350_usb3_rx_tbl),
+ .pcs_tbl = sm8350_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8350_usb3_pcs_tbl),
+ .pcs_usb_tbl = sm8350_usb3_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8350_usb3_pcs_usb_tbl),
+ .clk_list = qmp_v4_sm8250_usbphy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v5_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x300,
+};
+
+static void qmp_usb_legacy_configure_lane(void __iomem *base,
+ const struct qmp_phy_init_tbl tbl[],
+ int num,
+ u8 lane_mask)
+{
+ int i;
+ const struct qmp_phy_init_tbl *t = tbl;
+
+ if (!t)
+ return;
+
+ for (i = 0; i < num; i++, t++) {
+ if (!(t->lane_mask & lane_mask))
+ continue;
+
+ writel(t->val, base + t->offset);
+ }
+}
+
+static void qmp_usb_legacy_configure(void __iomem *base,
+ const struct qmp_phy_init_tbl tbl[],
+ int num)
+{
+ qmp_usb_legacy_configure_lane(base, tbl, num, 0xff);
+}
+
+static int qmp_usb_legacy_serdes_init(struct qmp_usb *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *serdes = qmp->serdes;
+ const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
+ int serdes_tbl_num = cfg->serdes_tbl_num;
+
+ qmp_usb_legacy_configure(serdes, serdes_tbl, serdes_tbl_num);
+
+ return 0;
+}
+
+static void qmp_usb_legacy_init_dp_com(struct phy *phy)
+{
+ struct qmp_usb *qmp = phy_get_drvdata(phy);
+ void __iomem *dp_com = qmp->dp_com;
+
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL,
+ SW_PWRDN);
+ /* override hardware control for reset of qmp phy */
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+
+ /* Default type-c orientation, i.e CC1 */
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_TYPEC_CTRL, 0x02);
+
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_PHY_MODE_CTRL,
+ USB3_MODE | DP_MODE);
+
+ /* bring both QMP USB and QMP DP PHYs PCS block out of reset */
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_SWI_CTRL, 0x03);
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
+}
+
+static int qmp_usb_legacy_init(struct phy *phy)
+{
+ struct qmp_usb *qmp = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs = qmp->pcs;
+ int ret;
+
+ ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+ if (ret) {
+ dev_err(qmp->dev, "reset assert failed\n");
+ goto err_disable_regulators;
+ }
+
+ ret = reset_control_bulk_deassert(cfg->num_resets, qmp->resets);
+ if (ret) {
+ dev_err(qmp->dev, "reset deassert failed\n");
+ goto err_disable_regulators;
+ }
+
+ ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ if (ret)
+ goto err_assert_reset;
+
+ qmp_usb_legacy_init_dp_com(phy);
+
+ qphy_setbits(pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], SW_PWRDN);
+
+ return 0;
+
+err_assert_reset:
+ reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+err_disable_regulators:
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+
+ return ret;
+}
+
+static int qmp_usb_legacy_exit(struct phy *phy)
+{
+ struct qmp_usb *qmp = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
+ reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+
+ return 0;
+}
+
+static int qmp_usb_legacy_power_on(struct phy *phy)
+{
+ struct qmp_usb *qmp = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *tx = qmp->tx;
+ void __iomem *rx = qmp->rx;
+ void __iomem *pcs = qmp->pcs;
+ void __iomem *status;
+ unsigned int val;
+ int ret;
+
+ qmp_usb_legacy_serdes_init(qmp);
+
+ ret = clk_prepare_enable(qmp->pipe_clk);
+ if (ret) {
+ dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret);
+ return ret;
+ }
+
+ /* Tx, Rx, and PCS configurations */
+ qmp_usb_legacy_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_usb_legacy_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
+
+ if (cfg->lanes >= 2) {
+ qmp_usb_legacy_configure_lane(qmp->tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2);
+ qmp_usb_legacy_configure_lane(qmp->rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2);
+ }
+
+ qmp_usb_legacy_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+
+ usleep_range(10, 20);
+
+ /* Pull PHY out of reset state */
+ qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* start SerDes and Phy-Coding-Sublayer */
+ qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], SERDES_START | PCS_START);
+
+ status = pcs + cfg->regs[QPHY_PCS_STATUS];
+ ret = readl_poll_timeout(status, val, !(val & PHYSTATUS), 200,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev, "phy initialization timed-out\n");
+ goto err_disable_pipe_clk;
+ }
+
+ return 0;
+
+err_disable_pipe_clk:
+ clk_disable_unprepare(qmp->pipe_clk);
+
+ return ret;
+}
+
+static int qmp_usb_legacy_power_off(struct phy *phy)
+{
+ struct qmp_usb *qmp = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
+ clk_disable_unprepare(qmp->pipe_clk);
+
+ /* PHY reset */
+ qphy_setbits(qmp->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* stop SerDes and Phy-Coding-Sublayer */
+ qphy_clrbits(qmp->pcs, cfg->regs[QPHY_START_CTRL],
+ SERDES_START | PCS_START);
+
+ /* Put PHY into POWER DOWN state: active low */
+ qphy_clrbits(qmp->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+ SW_PWRDN);
+
+ return 0;
+}
+
+static int qmp_usb_legacy_enable(struct phy *phy)
+{
+ int ret;
+
+ ret = qmp_usb_legacy_init(phy);
+ if (ret)
+ return ret;
+
+ ret = qmp_usb_legacy_power_on(phy);
+ if (ret)
+ qmp_usb_legacy_exit(phy);
+
+ return ret;
+}
+
+static int qmp_usb_legacy_disable(struct phy *phy)
+{
+ int ret;
+
+ ret = qmp_usb_legacy_power_off(phy);
+ if (ret)
+ return ret;
+ return qmp_usb_legacy_exit(phy);
+}
+
+static int qmp_usb_legacy_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+{
+ struct qmp_usb *qmp = phy_get_drvdata(phy);
+
+ qmp->mode = mode;
+
+ return 0;
+}
+
+static const struct phy_ops qmp_usb_legacy_phy_ops = {
+ .init = qmp_usb_legacy_enable,
+ .exit = qmp_usb_legacy_disable,
+ .set_mode = qmp_usb_legacy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static void qmp_usb_legacy_enable_autonomous_mode(struct qmp_usb *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs_usb = qmp->pcs_usb ?: qmp->pcs;
+ void __iomem *pcs_misc = qmp->pcs_misc;
+ u32 intr_mask;
+
+ if (qmp->mode == PHY_MODE_USB_HOST_SS ||
+ qmp->mode == PHY_MODE_USB_DEVICE_SS)
+ intr_mask = ARCVR_DTCT_EN | ALFPS_DTCT_EN;
+ else
+ intr_mask = ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL;
+
+ /* Clear any pending interrupts status */
+ qphy_setbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+ /* Writing 1 followed by 0 clears the interrupt */
+ qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+
+ qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
+ ARCVR_DTCT_EN | ALFPS_DTCT_EN | ARCVR_DTCT_EVENT_SEL);
+
+ /* Enable required PHY autonomous mode interrupts */
+ qphy_setbits(pcs_usb, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL], intr_mask);
+
+ /* Enable i/o clamp_n for autonomous mode */
+ if (pcs_misc)
+ qphy_clrbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
+}
+
+static void qmp_usb_legacy_disable_autonomous_mode(struct qmp_usb *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs_usb = qmp->pcs_usb ?: qmp->pcs;
+ void __iomem *pcs_misc = qmp->pcs_misc;
+
+ /* Disable i/o clamp_n on resume for normal mode */
+ if (pcs_misc)
+ qphy_setbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
+
+ qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
+ ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL | ALFPS_DTCT_EN);
+
+ qphy_setbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+ /* Writing 1 followed by 0 clears the interrupt */
+ qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+}
+
+static int __maybe_unused qmp_usb_legacy_runtime_suspend(struct device *dev)
+{
+ struct qmp_usb *qmp = dev_get_drvdata(dev);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
+ dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qmp->mode);
+
+ if (!qmp->phy->init_count) {
+ dev_vdbg(dev, "PHY not initialized, bailing out\n");
+ return 0;
+ }
+
+ qmp_usb_legacy_enable_autonomous_mode(qmp);
+
+ clk_disable_unprepare(qmp->pipe_clk);
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+
+ return 0;
+}
+
+static int __maybe_unused qmp_usb_legacy_runtime_resume(struct device *dev)
+{
+ struct qmp_usb *qmp = dev_get_drvdata(dev);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ int ret = 0;
+
+ dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qmp->mode);
+
+ if (!qmp->phy->init_count) {
+ dev_vdbg(dev, "PHY not initialized, bailing out\n");
+ return 0;
+ }
+
+ ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(qmp->pipe_clk);
+ if (ret) {
+ dev_err(dev, "pipe_clk enable failed, err=%d\n", ret);
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+ return ret;
+ }
+
+ qmp_usb_legacy_disable_autonomous_mode(qmp);
+
+ return 0;
+}
+
+static const struct dev_pm_ops qmp_usb_legacy_pm_ops = {
+ SET_RUNTIME_PM_OPS(qmp_usb_legacy_runtime_suspend,
+ qmp_usb_legacy_runtime_resume, NULL)
+};
+
+static int qmp_usb_legacy_vreg_init(struct qmp_usb *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ struct device *dev = qmp->dev;
+ int num = cfg->num_vregs;
+ int i;
+
+ qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
+ if (!qmp->vregs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->vregs[i].supply = cfg->vreg_list[i];
+
+ return devm_regulator_bulk_get(dev, num, qmp->vregs);
+}
+
+static int qmp_usb_legacy_reset_init(struct qmp_usb *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ struct device *dev = qmp->dev;
+ int i;
+ int ret;
+
+ qmp->resets = devm_kcalloc(dev, cfg->num_resets,
+ sizeof(*qmp->resets), GFP_KERNEL);
+ if (!qmp->resets)
+ return -ENOMEM;
+
+ for (i = 0; i < cfg->num_resets; i++)
+ qmp->resets[i].id = cfg->reset_list[i];
+
+ ret = devm_reset_control_bulk_get_exclusive(dev, cfg->num_resets, qmp->resets);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get resets\n");
+
+ return 0;
+}
+
+static int qmp_usb_legacy_clk_init(struct qmp_usb *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ struct device *dev = qmp->dev;
+ int num = cfg->num_clks;
+ int i;
+
+ qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL);
+ if (!qmp->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->clks[i].id = cfg->clk_list[i];
+
+ return devm_clk_bulk_get(dev, num, qmp->clks);
+}
+
+static void phy_clk_release_provider(void *res)
+{
+ of_clk_del_provider(res);
+}
+
+/*
+ * Register a fixed rate pipe clock.
+ *
+ * The <s>_pipe_clksrc generated by PHY goes to the GCC that gate
+ * controls it. The <s>_pipe_clk coming out of the GCC is requested
+ * by the PHY driver for its operations.
+ * We register the <s>_pipe_clksrc here. The gcc driver takes care
+ * of assigning this <s>_pipe_clksrc as parent to <s>_pipe_clk.
+ * Below picture shows this relationship.
+ *
+ * +---------------+
+ * | PHY block |<<---------------------------------------+
+ * | | |
+ * | +-------+ | +-----+ |
+ * I/P---^-->| PLL |---^--->pipe_clksrc--->| GCC |--->pipe_clk---+
+ * clk | +-------+ | +-----+
+ * +---------------+
+ */
+static int phy_pipe_clk_register(struct qmp_usb *qmp, struct device_node *np)
+{
+ struct clk_fixed_rate *fixed = &qmp->pipe_clk_fixed;
+ struct clk_init_data init = { };
+ int ret;
+
+ ret = of_property_read_string(np, "clock-output-names", &init.name);
+ if (ret) {
+ dev_err(qmp->dev, "%pOFn: No clock-output-names\n", np);
+ return ret;
+ }
+
+ init.ops = &clk_fixed_rate_ops;
+
+ /* controllers using QMP phys use 125MHz pipe clock interface */
+ fixed->fixed_rate = 125000000;
+ fixed->hw.init = &init;
+
+ ret = devm_clk_hw_register(qmp->dev, &fixed->hw);
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &fixed->hw);
+ if (ret)
+ return ret;
+
+ /*
+ * Roll a devm action because the clock provider is the child node, but
+ * the child node is not actually a device.
+ */
+ return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np);
+}
+
+static void __iomem *qmp_usb_legacy_iomap(struct device *dev, struct device_node *np,
+ int index, bool exclusive)
+{
+ struct resource res;
+
+ if (!exclusive) {
+ if (of_address_to_resource(np, index, &res))
+ return IOMEM_ERR_PTR(-EINVAL);
+
+ return devm_ioremap(dev, res.start, resource_size(&res));
+ }
+
+ return devm_of_iomap(dev, np, index, NULL);
+}
+
+static int qmp_usb_legacy_parse_dt_legacy(struct qmp_usb *qmp, struct device_node *np)
+{
+ struct platform_device *pdev = to_platform_device(qmp->dev);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ struct device *dev = qmp->dev;
+ bool exclusive = true;
+
+ qmp->serdes = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(qmp->serdes))
+ return PTR_ERR(qmp->serdes);
+
+ qmp->dp_com = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(qmp->dp_com))
+ return PTR_ERR(qmp->dp_com);
+
+ /*
+ * Get memory resources for the PHY:
+ * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2.
+ * For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5
+ * For single lane PHYs: pcs_misc (optional) -> 3.
+ */
+ qmp->tx = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(qmp->tx))
+ return PTR_ERR(qmp->tx);
+
+ qmp->rx = devm_of_iomap(dev, np, 1, NULL);
+ if (IS_ERR(qmp->rx))
+ return PTR_ERR(qmp->rx);
+
+ qmp->pcs = qmp_usb_legacy_iomap(dev, np, 2, exclusive);
+ if (IS_ERR(qmp->pcs))
+ return PTR_ERR(qmp->pcs);
+
+ if (cfg->pcs_usb_offset)
+ qmp->pcs_usb = qmp->pcs + cfg->pcs_usb_offset;
+
+ if (cfg->lanes >= 2) {
+ qmp->tx2 = devm_of_iomap(dev, np, 3, NULL);
+ if (IS_ERR(qmp->tx2))
+ return PTR_ERR(qmp->tx2);
+
+ qmp->rx2 = devm_of_iomap(dev, np, 4, NULL);
+ if (IS_ERR(qmp->rx2))
+ return PTR_ERR(qmp->rx2);
+
+ qmp->pcs_misc = devm_of_iomap(dev, np, 5, NULL);
+ } else {
+ qmp->pcs_misc = devm_of_iomap(dev, np, 3, NULL);
+ }
+
+ if (IS_ERR(qmp->pcs_misc)) {
+ dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
+ qmp->pcs_misc = NULL;
+ }
+
+ qmp->pipe_clk = devm_get_clk_from_child(dev, np, NULL);
+ if (IS_ERR(qmp->pipe_clk)) {
+ return dev_err_probe(dev, PTR_ERR(qmp->pipe_clk),
+ "failed to get pipe clock\n");
+ }
+
+ return 0;
+}
+
+static int qmp_usb_legacy_parse_dt(struct qmp_usb *qmp)
+{
+ struct platform_device *pdev = to_platform_device(qmp->dev);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ const struct qmp_usb_legacy_offsets *offs = cfg->offsets;
+ struct device *dev = qmp->dev;
+ void __iomem *base;
+
+ if (!offs)
+ return -EINVAL;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ qmp->serdes = base + offs->serdes;
+ qmp->pcs = base + offs->pcs;
+ qmp->pcs_usb = base + offs->pcs_usb;
+ qmp->tx = base + offs->tx;
+ qmp->rx = base + offs->rx;
+
+ qmp->pipe_clk = devm_clk_get(dev, "pipe");
+ if (IS_ERR(qmp->pipe_clk)) {
+ return dev_err_probe(dev, PTR_ERR(qmp->pipe_clk),
+ "failed to get pipe clock\n");
+ }
+
+ return 0;
+}
+
+static int qmp_usb_legacy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ struct device_node *np;
+ struct qmp_usb *qmp;
+ int ret;
+
+ qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL);
+ if (!qmp)
+ return -ENOMEM;
+
+ qmp->dev = dev;
+
+ qmp->cfg = of_device_get_match_data(dev);
+ if (!qmp->cfg)
+ return -EINVAL;
+
+ ret = qmp_usb_legacy_clk_init(qmp);
+ if (ret)
+ return ret;
+
+ ret = qmp_usb_legacy_reset_init(qmp);
+ if (ret)
+ return ret;
+
+ ret = qmp_usb_legacy_vreg_init(qmp);
+ if (ret)
+ return ret;
+
+ /* Check for legacy binding with child node. */
+ np = of_get_next_available_child(dev->of_node, NULL);
+ if (np) {
+ ret = qmp_usb_legacy_parse_dt_legacy(qmp, np);
+ } else {
+ np = of_node_get(dev->of_node);
+ ret = qmp_usb_legacy_parse_dt(qmp);
+ }
+ if (ret)
+ goto err_node_put;
+
+ pm_runtime_set_active(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ goto err_node_put;
+ /*
+ * Prevent runtime pm from being ON by default. Users can enable
+ * it using power/control in sysfs.
+ */
+ pm_runtime_forbid(dev);
+
+ ret = phy_pipe_clk_register(qmp, np);
+ if (ret)
+ goto err_node_put;
+
+ qmp->phy = devm_phy_create(dev, np, &qmp_usb_legacy_phy_ops);
+ if (IS_ERR(qmp->phy)) {
+ ret = PTR_ERR(qmp->phy);
+ dev_err(dev, "failed to create PHY: %d\n", ret);
+ goto err_node_put;
+ }
+
+ phy_set_drvdata(qmp->phy, qmp);
+
+ of_node_put(np);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+
+err_node_put:
+ of_node_put(np);
+ return ret;
+}
+
+static const struct of_device_id qmp_usb_legacy_of_match_table[] = {
+ {
+ .compatible = "qcom,sc7180-qmp-usb3-phy",
+ .data = &sc7180_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sc8180x-qmp-usb3-phy",
+ .data = &sm8150_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sdm845-qmp-usb3-phy",
+ .data = &qmp_v3_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm8150-qmp-usb3-phy",
+ .data = &sm8150_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm8250-qmp-usb3-phy",
+ .data = &sm8250_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm8350-qmp-usb3-phy",
+ .data = &sm8350_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm8450-qmp-usb3-phy",
+ .data = &sm8350_usb3phy_cfg,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qmp_usb_legacy_of_match_table);
+
+static struct platform_driver qmp_usb_legacy_driver = {
+ .probe = qmp_usb_legacy_probe,
+ .driver = {
+ .name = "qcom-qmp-usb-legacy-phy",
+ .pm = &qmp_usb_legacy_pm_ops,
+ .of_match_table = qmp_usb_legacy_of_match_table,
+ },
+};
+
+module_platform_driver(qmp_usb_legacy_driver);
+
+MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm QMP legacy USB+DP PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index 466f0a56c82e..0130bb8e809a 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -367,112 +366,6 @@ static const struct qmp_phy_init_tbl msm8996_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V2_PCS_POWER_STATE_CONFIG2, 0x08),
};
-static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
-};
-
-static const struct qmp_phy_init_tbl qmp_v3_usb3_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x09),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06),
-};
-
-static const struct qmp_phy_init_tbl qmp_v3_usb3_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
-};
-
-static const struct qmp_phy_init_tbl qmp_v3_usb3_pcs_tbl[] = {
- /* FLL settings */
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
-
- /* Lock Det settings */
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
-
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xba),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x1d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
-
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
-};
-
static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
@@ -693,117 +586,6 @@ static const struct qmp_phy_init_tbl msm8998_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
};
-static const struct qmp_phy_init_tbl sm8150_usb3_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0xde),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_IPTRIM, 0x20),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x1a),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0xab),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0xea),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xea),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xca),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1e),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
-};
-
-static const struct qmp_phy_init_tbl sm8150_usb3_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_TX, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_RX, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x20),
-};
-
-static const struct qmp_phy_init_tbl sm8150_usb3_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xbf),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xbf),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x94),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb3),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VTH_CODE, 0x10),
-};
-
-static const struct qmp_phy_init_tbl sm8150_usb3_pcs_tbl[] = {
- /* Lock Det settings */
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
-
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
-};
-
-static const struct qmp_phy_init_tbl sm8150_usb3_pcs_usb_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
-};
-
static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x1a),
QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
@@ -915,78 +697,6 @@ static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_pcs_usb_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
};
-static const struct qmp_phy_init_tbl sm8250_usb3_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_TX, 0x60),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_RX, 0x60),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V4_TX_PI_QEC_CTRL, 0x40, 1),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V4_TX_PI_QEC_CTRL, 0x54, 2),
-};
-
-static const struct qmp_phy_init_tbl sm8250_usb3_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_LOW, 0xff, 1),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f, 2),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x7f, 1),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xff, 2),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x97),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x7b),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb4),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VTH_CODE, 0x10),
-};
-
-static const struct qmp_phy_init_tbl sm8250_usb3_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xa9),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
-};
-
-static const struct qmp_phy_init_tbl sm8250_usb3_pcs_usb_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
-};
-
static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
@@ -1148,84 +858,6 @@ static const struct qmp_phy_init_tbl sdx65_usb3_uniphy_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_ENABLES, 0x00),
};
-static const struct qmp_phy_init_tbl sm8350_usb3_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_TX, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_RX, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0x35),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_5, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21),
-};
-
-static const struct qmp_phy_init_tbl sm8350_usb3_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xbb),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x7b),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xbb),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3d, 1),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3c, 2),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdb),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xd2),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x13),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_EN_TIMER, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_DCC_CTRL1, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_VTH_CODE, 0x10),
-};
-
-static const struct qmp_phy_init_tbl sm8350_usb3_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
-};
-
-static const struct qmp_phy_init_tbl sm8350_usb3_pcs_usb_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_L, 0x40),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_H, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
-};
-
static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xa5),
QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_2, 0x82),
@@ -1556,9 +1188,6 @@ struct qmp_phy_cfg {
/* true, if PHY needs delay after POWER_DOWN */
bool has_pwrdn_delay;
- /* true, if PHY has a separate DP_COM control block */
- bool has_phy_dp_com_ctrl;
-
/* Offset from PCS to PCS_USB region */
unsigned int pcs_usb_offset;
};
@@ -1577,8 +1206,6 @@ struct qmp_usb {
void __iomem *tx2;
void __iomem *rx2;
- void __iomem *dp_com;
-
struct clk *pipe_clk;
struct clk_bulk_data *clks;
struct reset_control_bulk_data *resets;
@@ -1632,11 +1259,6 @@ static const char * const qmp_v4_ref_phy_clk_l[] = {
"aux", "ref_clk_src", "ref", "com_aux",
};
-/* the primary usb3 phy on sm8250 doesn't have a ref clock */
-static const char * const qmp_v4_sm8250_usbphy_clk_l[] = {
- "aux", "ref_clk_src", "com_aux"
-};
-
/* usb3 phy on sdx55 doesn't have com_aux clock */
static const char * const qmp_v4_sdx55_usbphy_clk_l[] = {
"aux", "cfg_ahb", "ref"
@@ -1651,10 +1273,6 @@ static const char * const msm8996_usb3phy_reset_l[] = {
"phy", "common",
};
-static const char * const sc7180_usb3phy_reset_l[] = {
- "phy",
-};
-
static const char * const qcm2290_usb3phy_reset_l[] = {
"phy_phy", "phy",
};
@@ -1752,29 +1370,6 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
.regs = qmp_v2_usb3phy_regs_layout,
};
-static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
- .lanes = 2,
-
- .serdes_tbl = qmp_v3_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
- .tx_tbl = qmp_v3_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl),
- .rx_tbl = qmp_v3_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl),
- .pcs_tbl = qmp_v3_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl),
- .clk_list = qmp_v3_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v3_usb3phy_regs_layout,
-
- .has_pwrdn_delay = true,
- .has_phy_dp_com_ctrl = true,
-};
-
static const struct qmp_phy_cfg sa8775p_usb3_uniphy_cfg = {
.lanes = 1,
@@ -1797,29 +1392,6 @@ static const struct qmp_phy_cfg sa8775p_usb3_uniphy_cfg = {
.regs = qmp_v5_usb3phy_regs_layout,
};
-static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
- .lanes = 2,
-
- .serdes_tbl = qmp_v3_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
- .tx_tbl = qmp_v3_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl),
- .rx_tbl = qmp_v3_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl),
- .pcs_tbl = qmp_v3_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl),
- .clk_list = qmp_v3_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
- .reset_list = sc7180_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v3_usb3phy_regs_layout,
-
- .has_pwrdn_delay = true,
- .has_phy_dp_com_ctrl = true,
-};
-
static const struct qmp_phy_cfg sc8280xp_usb3_uniphy_cfg = {
.lanes = 1,
@@ -1884,32 +1456,6 @@ static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
.regs = qmp_v3_usb3phy_regs_layout,
};
-static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
- .lanes = 2,
-
- .serdes_tbl = sm8150_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
- .tx_tbl = sm8150_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8150_usb3_tx_tbl),
- .rx_tbl = sm8150_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8150_usb3_rx_tbl),
- .pcs_tbl = sm8150_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_tbl),
- .pcs_usb_tbl = sm8150_usb3_pcs_usb_tbl,
- .pcs_usb_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_usb_tbl),
- .clk_list = qmp_v4_ref_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_ref_phy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3phy_regs_layout,
- .pcs_usb_offset = 0x300,
-
- .has_pwrdn_delay = true,
- .has_phy_dp_com_ctrl = true,
-};
-
static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = {
.lanes = 1,
@@ -1935,32 +1481,6 @@ static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = {
.has_pwrdn_delay = true,
};
-static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
- .lanes = 2,
-
- .serdes_tbl = sm8150_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
- .tx_tbl = sm8250_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8250_usb3_tx_tbl),
- .rx_tbl = sm8250_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8250_usb3_rx_tbl),
- .pcs_tbl = sm8250_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_pcs_tbl),
- .pcs_usb_tbl = sm8250_usb3_pcs_usb_tbl,
- .pcs_usb_tbl_num = ARRAY_SIZE(sm8250_usb3_pcs_usb_tbl),
- .clk_list = qmp_v4_sm8250_usbphy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3phy_regs_layout,
- .pcs_usb_offset = 0x300,
-
- .has_pwrdn_delay = true,
- .has_phy_dp_com_ctrl = true,
-};
-
static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = {
.lanes = 1,
@@ -2036,32 +1556,6 @@ static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
.has_pwrdn_delay = true,
};
-static const struct qmp_phy_cfg sm8350_usb3phy_cfg = {
- .lanes = 2,
-
- .serdes_tbl = sm8150_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
- .tx_tbl = sm8350_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8350_usb3_tx_tbl),
- .rx_tbl = sm8350_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8350_usb3_rx_tbl),
- .pcs_tbl = sm8350_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8350_usb3_pcs_tbl),
- .pcs_usb_tbl = sm8350_usb3_pcs_usb_tbl,
- .pcs_usb_tbl_num = ARRAY_SIZE(sm8350_usb3_pcs_usb_tbl),
- .clk_list = qmp_v4_sm8250_usbphy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v5_usb3phy_regs_layout,
- .pcs_usb_offset = 0x300,
-
- .has_pwrdn_delay = true,
- .has_phy_dp_com_ctrl = true,
-};
-
static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
.lanes = 1,
@@ -2152,7 +1646,6 @@ static int qmp_usb_init(struct phy *phy)
struct qmp_usb *qmp = phy_get_drvdata(phy);
const struct qmp_phy_cfg *cfg = qmp->cfg;
void __iomem *pcs = qmp->pcs;
- void __iomem *dp_com = qmp->dp_com;
int ret;
ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
@@ -2177,29 +1670,6 @@ static int qmp_usb_init(struct phy *phy)
if (ret)
goto err_assert_reset;
- if (cfg->has_phy_dp_com_ctrl) {
- qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL,
- SW_PWRDN);
- /* override hardware control for reset of qmp phy */
- qphy_setbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
- SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
- SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
-
- /* Default type-c orientation, i.e CC1 */
- qphy_setbits(dp_com, QPHY_V3_DP_COM_TYPEC_CTRL, 0x02);
-
- qphy_setbits(dp_com, QPHY_V3_DP_COM_PHY_MODE_CTRL,
- USB3_MODE | DP_MODE);
-
- /* bring both QMP USB and QMP DP PHYs PCS block out of reset */
- qphy_clrbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
- SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
- SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
-
- qphy_clrbits(dp_com, QPHY_V3_DP_COM_SWI_CTRL, 0x03);
- qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
- }
-
qphy_setbits(pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], SW_PWRDN);
return 0;
@@ -2582,12 +2052,6 @@ static int qmp_usb_parse_dt_legacy(struct qmp_usb *qmp, struct device_node *np)
if (IS_ERR(qmp->serdes))
return PTR_ERR(qmp->serdes);
- if (cfg->has_phy_dp_com_ctrl) {
- qmp->dp_com = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(qmp->dp_com))
- return PTR_ERR(qmp->dp_com);
- }
-
/*
* FIXME: These bindings should be fixed to not rely on overlapping
* mappings for PCS.
@@ -2780,18 +2244,9 @@ static const struct of_device_id qmp_usb_of_match_table[] = {
.compatible = "qcom,sa8775p-qmp-usb3-uni-phy",
.data = &sa8775p_usb3_uniphy_cfg,
}, {
- .compatible = "qcom,sc7180-qmp-usb3-phy",
- .data = &sc7180_usb3phy_cfg,
- }, {
- .compatible = "qcom,sc8180x-qmp-usb3-phy",
- .data = &sm8150_usb3phy_cfg,
- }, {
.compatible = "qcom,sc8280xp-qmp-usb3-uni-phy",
.data = &sc8280xp_usb3_uniphy_cfg,
}, {
- .compatible = "qcom,sdm845-qmp-usb3-phy",
- .data = &qmp_v3_usb3phy_cfg,
- }, {
.compatible = "qcom,sdm845-qmp-usb3-uni-phy",
.data = &qmp_v3_usb3_uniphy_cfg,
}, {
@@ -2804,26 +2259,14 @@ static const struct of_device_id qmp_usb_of_match_table[] = {
.compatible = "qcom,sm6115-qmp-usb3-phy",
.data = &qcm2290_usb3phy_cfg,
}, {
- .compatible = "qcom,sm8150-qmp-usb3-phy",
- .data = &sm8150_usb3phy_cfg,
- }, {
.compatible = "qcom,sm8150-qmp-usb3-uni-phy",
.data = &sm8150_usb3_uniphy_cfg,
}, {
- .compatible = "qcom,sm8250-qmp-usb3-phy",
- .data = &sm8250_usb3phy_cfg,
- }, {
.compatible = "qcom,sm8250-qmp-usb3-uni-phy",
.data = &sm8250_usb3_uniphy_cfg,
}, {
- .compatible = "qcom,sm8350-qmp-usb3-phy",
- .data = &sm8350_usb3phy_cfg,
- }, {
.compatible = "qcom,sm8350-qmp-usb3-uni-phy",
.data = &sm8350_usb3_uniphy_cfg,
- }, {
- .compatible = "qcom,sm8450-qmp-usb3-phy",
- .data = &sm8350_usb3phy_cfg,
},
{ },
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 7ee4b0e07d11..32d897684755 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -134,6 +134,8 @@
#define QPHY_V4_PCS_MISC_TYPEC_STATUS 0x10
#define QPHY_V4_PCS_MISC_PLACEHOLDER_STATUS 0x14
+#define QSERDES_V5_DP_PHY_STATUS 0x0dc
+
/* Only for QMP V6 PHY - DP PHY registers */
#define QSERDES_V6_DP_PHY_AUX_INTERRUPT_STATUS 0x0e0
#define QSERDES_V6_DP_PHY_STATUS 0x0e4
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index bec6e40d5280..c52655a383ce 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/qualcomm/phy-qcom-snps-eusb2.c b/drivers/phy/qualcomm/phy-qcom-snps-eusb2.c
index eeaa1eb0e24b..1484691a41d5 100644
--- a/drivers/phy/qualcomm/phy-qcom-snps-eusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-snps-eusb2.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
index d0319bee01c0..eb0b0f61d98e 100644
--- a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
+++ b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs.c b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
index 53e46c220a3a..98a18987f1be 100644
--- a/drivers/phy/qualcomm/phy-qcom-usb-hs.c
+++ b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
@@ -7,7 +7,7 @@
#include <linux/ulpi/regs.h>
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/reset.h>
#include <linux/extcon.h>
diff --git a/drivers/phy/ralink/phy-mt7621-pci.c b/drivers/phy/ralink/phy-mt7621-pci.c
index 85888ab2d307..2f876f158e1d 100644
--- a/drivers/phy/ralink/phy-mt7621-pci.c
+++ b/drivers/phy/ralink/phy-mt7621-pci.c
@@ -9,8 +9,7 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/renesas/phy-rcar-gen2.c b/drivers/phy/renesas/phy-rcar-gen2.c
index c375a4676a3d..507435af2656 100644
--- a/drivers/phy/renesas/phy-rcar-gen2.c
+++ b/drivers/phy/renesas/phy-rcar-gen2.c
@@ -16,7 +16,6 @@
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
-#include <linux/of_device.h>
#define USBHS_LPSTS 0x02
#define USBHS_UGCTRL 0x80
diff --git a/drivers/phy/renesas/phy-rcar-gen3-pcie.c b/drivers/phy/renesas/phy-rcar-gen3-pcie.c
index 9cf786a7daac..0ce7e9c94444 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-pcie.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-pcie.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index d4e2ee7e4efb..e53eace7c91e 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -15,8 +15,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/phy/renesas/r8a779f0-ether-serdes.c b/drivers/phy/renesas/r8a779f0-ether-serdes.c
index 55b7bdfc10d3..683b19bc411a 100644
--- a/drivers/phy/renesas/r8a779f0-ether-serdes.c
+++ b/drivers/phy/renesas/r8a779f0-ether-serdes.c
@@ -8,6 +8,7 @@
#include <linux/err.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -339,22 +340,15 @@ static int r8a779f0_eth_serdes_probe(struct platform_device *pdev)
{
struct r8a779f0_eth_serdes_drv_data *dd;
struct phy_provider *provider;
- struct resource *res;
int i;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "invalid resource\n");
- return -EINVAL;
- }
-
dd = devm_kzalloc(&pdev->dev, sizeof(*dd), GFP_KERNEL);
if (!dd)
return -ENOMEM;
platform_set_drvdata(pdev, dd);
dd->pdev = pdev;
- dd->addr = devm_ioremap_resource(&pdev->dev, res);
+ dd->addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dd->addr))
return PTR_ERR(dd->addr);
diff --git a/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c b/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c
index 639452f47869..e6a768bbb9b3 100644
--- a/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c
+++ b/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c
@@ -21,7 +21,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-mipi-dphy.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
index 401b0aabb159..6405943a2676 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
@@ -14,7 +14,7 @@
#include <linux/init.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -770,6 +770,9 @@ static const struct of_device_id inno_dsidphy_of_match[] = {
}, {
.compatible = "rockchip,rk3568-dsi-dphy",
.data = &max_2_5ghz_video_phy_plat_data,
+ }, {
+ .compatible = "rockchip,rv1126-dsi-dphy",
+ .data = &max_2_5ghz_video_phy_plat_data,
},
{}
};
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
index 1e1563f5fffc..053bd62e31ba 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/phy/phy.h>
@@ -245,6 +244,7 @@ struct inno_hdmi_phy {
struct clk_hw hw;
struct clk *phyclk;
unsigned long pixclock;
+ unsigned long tmdsclock;
};
struct pre_pll_config {
@@ -291,31 +291,179 @@ struct inno_hdmi_phy_drv_data {
};
static const struct pre_pll_config pre_pll_cfg_table[] = {
- { 27000000, 27000000, 1, 90, 3, 2, 2, 10, 3, 3, 4, 0, 0},
- { 27000000, 33750000, 1, 90, 1, 3, 3, 10, 3, 3, 4, 0, 0},
- { 40000000, 40000000, 1, 80, 2, 2, 2, 12, 2, 2, 2, 0, 0},
- { 59341000, 59341000, 1, 98, 3, 1, 2, 1, 3, 3, 4, 0, 0xE6AE6B},
- { 59400000, 59400000, 1, 99, 3, 1, 1, 1, 3, 3, 4, 0, 0},
- { 59341000, 74176250, 1, 98, 0, 3, 3, 1, 3, 3, 4, 0, 0xE6AE6B},
- { 59400000, 74250000, 1, 99, 1, 2, 2, 1, 3, 3, 4, 0, 0},
- { 74176000, 74176000, 1, 98, 1, 2, 2, 1, 2, 3, 4, 0, 0xE6AE6B},
- { 74250000, 74250000, 1, 99, 1, 2, 2, 1, 2, 3, 4, 0, 0},
- { 74176000, 92720000, 4, 494, 1, 2, 2, 1, 3, 3, 4, 0, 0x816817},
- { 74250000, 92812500, 4, 495, 1, 2, 2, 1, 3, 3, 4, 0, 0},
- {148352000, 148352000, 1, 98, 1, 1, 1, 1, 2, 2, 2, 0, 0xE6AE6B},
- {148500000, 148500000, 1, 99, 1, 1, 1, 1, 2, 2, 2, 0, 0},
- {148352000, 185440000, 4, 494, 0, 2, 2, 1, 3, 2, 2, 0, 0x816817},
- {148500000, 185625000, 4, 495, 0, 2, 2, 1, 3, 2, 2, 0, 0},
- {296703000, 296703000, 1, 98, 0, 1, 1, 1, 0, 2, 2, 0, 0xE6AE6B},
- {297000000, 297000000, 1, 99, 0, 1, 1, 1, 0, 2, 2, 0, 0},
- {296703000, 370878750, 4, 494, 1, 2, 0, 1, 3, 1, 1, 0, 0x816817},
- {297000000, 371250000, 4, 495, 1, 2, 0, 1, 3, 1, 1, 0, 0},
- {593407000, 296703500, 1, 98, 0, 1, 1, 1, 0, 2, 1, 0, 0xE6AE6B},
- {594000000, 297000000, 1, 99, 0, 1, 1, 1, 0, 2, 1, 0, 0},
- {593407000, 370879375, 4, 494, 1, 2, 0, 1, 3, 1, 1, 1, 0x816817},
- {594000000, 371250000, 4, 495, 1, 2, 0, 1, 3, 1, 1, 1, 0},
- {593407000, 593407000, 1, 98, 0, 2, 0, 1, 0, 1, 1, 0, 0xE6AE6B},
- {594000000, 594000000, 1, 99, 0, 2, 0, 1, 0, 1, 1, 0, 0},
+ { 25175000, 25175000, 3, 125, 3, 1, 1, 1, 3, 3, 4, 0, 0xe00000},
+ { 25175000, 31468750, 1, 41, 0, 3, 3, 1, 3, 3, 4, 0, 0xf5554f},
+ { 27000000, 27000000, 1, 36, 0, 3, 3, 1, 2, 3, 4, 0, 0x0},
+ { 27000000, 33750000, 1, 45, 0, 3, 3, 1, 3, 3, 4, 0, 0x0},
+ { 31500000, 31500000, 1, 42, 0, 3, 3, 1, 2, 3, 4, 0, 0x0},
+ { 31500000, 39375000, 1, 105, 1, 3, 3, 10, 0, 3, 4, 0, 0x0},
+ { 33750000, 33750000, 1, 45, 0, 3, 3, 1, 2, 3, 4, 0, 0x0},
+ { 33750000, 42187500, 1, 169, 2, 3, 3, 15, 0, 3, 4, 0, 0x0},
+ { 35500000, 35500000, 1, 71, 2, 2, 2, 6, 0, 3, 4, 0, 0x0},
+ { 35500000, 44375000, 1, 74, 3, 1, 1, 25, 0, 1, 1, 0, 0x0},
+ { 36000000, 36000000, 1, 36, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ { 36000000, 45000000, 1, 45, 2, 1, 1, 15, 0, 1, 1, 0, 0x0},
+ { 40000000, 40000000, 1, 40, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ { 40000000, 50000000, 1, 50, 2, 1, 1, 15, 0, 1, 1, 0, 0x0},
+ { 49500000, 49500000, 1, 66, 0, 3, 3, 1, 2, 3, 4, 0, 0x0},
+ { 49500000, 61875000, 1, 165, 1, 3, 3, 10, 0, 3, 4, 0, 0x0},
+ { 50000000, 50000000, 1, 50, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ { 50000000, 62500000, 1, 125, 2, 2, 2, 15, 0, 2, 2, 0, 0x0},
+ { 54000000, 54000000, 1, 36, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ { 54000000, 67500000, 1, 45, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ { 56250000, 56250000, 1, 75, 0, 3, 3, 1, 2, 3, 4, 0, 0x0},
+ { 56250000, 70312500, 1, 117, 3, 1, 1, 25, 0, 1, 1, 0, 0x0},
+ { 59341000, 59341000, 1, 118, 2, 2, 2, 6, 0, 3, 4, 0, 0xae978d},
+ { 59341000, 74176250, 2, 148, 2, 1, 1, 15, 0, 1, 1, 0, 0x5a3d70},
+ { 59400000, 59400000, 1, 99, 3, 1, 1, 1, 3, 3, 4, 0, 0x0},
+ { 59400000, 74250000, 1, 99, 0, 3, 3, 1, 3, 3, 4, 0, 0x0},
+ { 65000000, 65000000, 1, 65, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ { 65000000, 81250000, 3, 325, 0, 3, 3, 1, 3, 3, 4, 0, 0x0},
+ { 68250000, 68250000, 1, 91, 0, 3, 3, 1, 2, 3, 4, 0, 0x0},
+ { 68250000, 85312500, 1, 142, 3, 1, 1, 25, 0, 1, 1, 0, 0x0},
+ { 71000000, 71000000, 1, 71, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ { 71000000, 88750000, 3, 355, 0, 3, 3, 1, 3, 3, 4, 0, 0x0},
+ { 72000000, 72000000, 1, 36, 2, 0, 0, 1, 1, 2, 2, 0, 0x0},
+ { 72000000, 90000000, 1, 60, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ { 73250000, 73250000, 3, 293, 0, 3, 3, 1, 2, 3, 4, 0, 0x0},
+ { 73250000, 91562500, 1, 61, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ { 74176000, 74176000, 1, 37, 2, 0, 0, 1, 1, 2, 2, 0, 0x16872b},
+ { 74176000, 92720000, 2, 185, 2, 1, 1, 15, 0, 1, 1, 0, 0x70a3d7},
+ { 74250000, 74250000, 1, 99, 0, 3, 3, 1, 2, 3, 4, 0, 0x0},
+ { 74250000, 92812500, 4, 495, 0, 3, 3, 1, 3, 3, 4, 0, 0x0},
+ { 75000000, 75000000, 1, 50, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ { 75000000, 93750000, 1, 125, 0, 3, 3, 1, 3, 3, 4, 0, 0x0},
+ { 78750000, 78750000, 1, 105, 0, 3, 3, 1, 2, 3, 4, 0, 0x0},
+ { 78750000, 98437500, 1, 164, 3, 1, 1, 25, 0, 1, 1, 0, 0x0},
+ { 79500000, 79500000, 1, 53, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ { 79500000, 99375000, 1, 199, 2, 2, 2, 15, 0, 2, 2, 0, 0x0},
+ { 83500000, 83500000, 2, 167, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ { 83500000, 104375000, 1, 104, 2, 1, 1, 15, 0, 1, 1, 0, 0x600000},
+ { 85500000, 85500000, 1, 57, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ { 85500000, 106875000, 1, 178, 3, 1, 1, 25, 0, 1, 1, 0, 0x0},
+ { 85750000, 85750000, 3, 343, 0, 3, 3, 1, 2, 3, 4, 0, 0x0},
+ { 85750000, 107187500, 1, 143, 0, 3, 3, 1, 3, 3, 4, 0, 0x0},
+ { 88750000, 88750000, 3, 355, 0, 3, 3, 1, 2, 3, 4, 0, 0x0},
+ { 88750000, 110937500, 1, 110, 2, 1, 1, 15, 0, 1, 1, 0, 0xf00000},
+ { 94500000, 94500000, 1, 63, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ { 94500000, 118125000, 1, 197, 3, 1, 1, 25, 0, 1, 1, 0, 0x0},
+ {101000000, 101000000, 1, 101, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ {101000000, 126250000, 1, 42, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {102250000, 102250000, 4, 409, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ {102250000, 127812500, 1, 128, 2, 1, 1, 15, 0, 1, 1, 0, 0x0},
+ {106500000, 106500000, 1, 71, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ {106500000, 133125000, 1, 133, 2, 1, 1, 15, 0, 1, 1, 0, 0x0},
+ {108000000, 108000000, 1, 36, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {108000000, 135000000, 1, 45, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {115500000, 115500000, 1, 77, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ {115500000, 144375000, 1, 48, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {117500000, 117500000, 2, 235, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ {117500000, 146875000, 1, 49, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {119000000, 119000000, 1, 119, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ {119000000, 148750000, 3, 148, 0, 1, 1, 1, 3, 1, 1, 0, 0xc00000},
+ {121750000, 121750000, 4, 487, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ {121750000, 152187500, 1, 203, 0, 3, 3, 1, 3, 3, 4, 0, 0x0},
+ {122500000, 122500000, 2, 245, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ {122500000, 153125000, 1, 51, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {135000000, 135000000, 1, 45, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {135000000, 168750000, 1, 169, 2, 1, 1, 15, 0, 1, 1, 0, 0x0},
+ {136750000, 136750000, 1, 68, 2, 0, 0, 1, 1, 2, 2, 0, 0x600000},
+ {136750000, 170937500, 1, 113, 0, 2, 2, 1, 3, 2, 2, 0, 0xf5554f},
+ {140250000, 140250000, 2, 187, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ {140250000, 175312500, 1, 117, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ {146250000, 146250000, 2, 195, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ {146250000, 182812500, 1, 61, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {148250000, 148250000, 3, 222, 2, 0, 0, 1, 1, 2, 2, 0, 0x600000},
+ {148250000, 185312500, 1, 123, 0, 2, 2, 1, 3, 2, 2, 0, 0x8aaab0},
+ {148352000, 148352000, 2, 148, 2, 0, 0, 1, 1, 2, 2, 0, 0x5a1cac},
+ {148352000, 185440000, 3, 185, 0, 1, 1, 1, 3, 1, 1, 0, 0x70a3d7},
+ {148500000, 148500000, 1, 99, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ {148500000, 185625000, 4, 495, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ {154000000, 154000000, 1, 77, 2, 0, 0, 1, 1, 2, 2, 0, 0x0},
+ {154000000, 192500000, 1, 64, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {156000000, 156000000, 1, 52, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {156000000, 195000000, 1, 65, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {156750000, 156750000, 2, 209, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ {156750000, 195937500, 1, 196, 2, 1, 1, 15, 0, 1, 1, 0, 0x0},
+ {157000000, 157000000, 2, 157, 2, 0, 0, 1, 1, 2, 2, 0, 0x0},
+ {157000000, 196250000, 1, 131, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ {157500000, 157500000, 1, 105, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ {157500000, 196875000, 1, 197, 2, 1, 1, 15, 0, 1, 1, 0, 0x0},
+ {162000000, 162000000, 1, 54, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {162000000, 202500000, 2, 135, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {175500000, 175500000, 1, 117, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ {175500000, 219375000, 1, 73, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {179500000, 179500000, 3, 359, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ {179500000, 224375000, 1, 75, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {182750000, 182750000, 1, 91, 2, 0, 0, 1, 1, 2, 2, 0, 0x600000},
+ {182750000, 228437500, 1, 152, 0, 2, 2, 1, 3, 2, 2, 0, 0x4aaab0},
+ {182750000, 228437500, 1, 152, 0, 2, 2, 1, 3, 2, 2, 0, 0x4aaab0},
+ {187000000, 187000000, 2, 187, 2, 0, 0, 1, 1, 2, 2, 0, 0x0},
+ {187000000, 233750000, 1, 39, 0, 0, 0, 1, 3, 0, 0, 1, 0x0},
+ {187250000, 187250000, 3, 280, 2, 0, 0, 1, 1, 2, 2, 0, 0xe00000},
+ {187250000, 234062500, 1, 156, 0, 2, 2, 1, 3, 2, 2, 0, 0xaaab0},
+ {189000000, 189000000, 1, 63, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {189000000, 236250000, 1, 79, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {193250000, 193250000, 3, 289, 2, 0, 0, 1, 1, 2, 2, 0, 0xe00000},
+ {193250000, 241562500, 1, 161, 0, 2, 2, 1, 3, 2, 2, 0, 0xaaab0},
+ {202500000, 202500000, 2, 135, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {202500000, 253125000, 1, 169, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ {204750000, 204750000, 4, 273, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {204750000, 255937500, 1, 171, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ {208000000, 208000000, 1, 104, 2, 0, 0, 1, 1, 2, 2, 0, 0x0},
+ {208000000, 260000000, 1, 173, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ {214750000, 214750000, 1, 107, 2, 0, 0, 1, 1, 2, 2, 0, 0x600000},
+ {214750000, 268437500, 1, 178, 0, 2, 2, 1, 3, 2, 2, 0, 0xf5554f},
+ {218250000, 218250000, 4, 291, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {218250000, 272812500, 1, 91, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {229500000, 229500000, 2, 153, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {229500000, 286875000, 1, 191, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ {234000000, 234000000, 1, 39, 0, 0, 0, 1, 0, 1, 1, 0, 0x0},
+ {234000000, 292500000, 1, 195, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ {241500000, 241500000, 2, 161, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {241500000, 301875000, 1, 201, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ {245250000, 245250000, 4, 327, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {245250000, 306562500, 1, 51, 0, 0, 0, 1, 3, 0, 0, 1, 0x0},
+ {245500000, 245500000, 4, 491, 2, 0, 0, 1, 1, 2, 2, 0, 0x0},
+ {245500000, 306875000, 1, 51, 0, 0, 0, 1, 3, 0, 0, 1, 0x0},
+ {261000000, 261000000, 1, 87, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {261000000, 326250000, 1, 109, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {268250000, 268250000, 9, 402, 0, 0, 0, 1, 0, 1, 1, 0, 0x600000},
+ {268250000, 335312500, 1, 111, 0, 1, 1, 1, 3, 1, 1, 0, 0xc5554f},
+ {268500000, 268500000, 2, 179, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {268500000, 335625000, 1, 56, 0, 0, 0, 1, 3, 0, 0, 1, 0x0},
+ {281250000, 281250000, 4, 375, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {281250000, 351562500, 1, 117, 0, 3, 1, 1, 3, 1, 1, 0, 0x0},
+ {288000000, 288000000, 1, 48, 0, 0, 0, 1, 0, 1, 1, 0, 0x0},
+ {288000000, 360000000, 1, 60, 0, 2, 0, 1, 3, 0, 0, 1, 0x0},
+ {296703000, 296703000, 1, 49, 0, 0, 0, 1, 0, 1, 1, 0, 0x7353f7},
+ {296703000, 370878750, 1, 123, 0, 3, 1, 1, 3, 1, 1, 0, 0xa051eb},
+ {297000000, 297000000, 1, 99, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {297000000, 371250000, 4, 495, 0, 3, 1, 1, 3, 1, 1, 0, 0x0},
+ {312250000, 312250000, 9, 468, 0, 0, 0, 1, 0, 1, 1, 0, 0x600000},
+ {312250000, 390312500, 1, 130, 0, 3, 1, 1, 3, 1, 1, 0, 0x1aaab0},
+ {317000000, 317000000, 3, 317, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {317000000, 396250000, 1, 66, 0, 2, 0, 1, 3, 0, 0, 1, 0x0},
+ {319750000, 319750000, 3, 159, 0, 0, 0, 1, 0, 1, 1, 0, 0xe00000},
+ {319750000, 399687500, 3, 199, 0, 2, 0, 1, 3, 0, 0, 1, 0xd80000},
+ {333250000, 333250000, 9, 499, 0, 0, 0, 1, 0, 1, 1, 0, 0xe00000},
+ {333250000, 416562500, 1, 138, 0, 3, 1, 1, 3, 1, 1, 0, 0xdaaab0},
+ {348500000, 348500000, 9, 522, 0, 2, 0, 1, 0, 1, 1, 0, 0xc00000},
+ {348500000, 435625000, 1, 145, 0, 3, 1, 1, 3, 1, 1, 0, 0x35554f},
+ {356500000, 356500000, 9, 534, 0, 2, 0, 1, 0, 1, 1, 0, 0xc00000},
+ {356500000, 445625000, 1, 148, 0, 3, 1, 1, 3, 1, 1, 0, 0x8aaab0},
+ {380500000, 380500000, 9, 570, 0, 2, 0, 1, 0, 1, 1, 0, 0xc00000},
+ {380500000, 475625000, 1, 158, 0, 3, 1, 1, 3, 1, 1, 0, 0x8aaab0},
+ {443250000, 443250000, 1, 73, 0, 2, 0, 1, 0, 1, 1, 0, 0xe00000},
+ {443250000, 554062500, 1, 92, 0, 2, 0, 1, 3, 0, 0, 1, 0x580000},
+ {505250000, 505250000, 9, 757, 0, 2, 0, 1, 0, 1, 1, 0, 0xe00000},
+ {552750000, 552750000, 3, 276, 0, 2, 0, 1, 0, 1, 1, 0, 0x600000},
+ {593407000, 296703500, 3, 296, 0, 1, 1, 1, 0, 1, 1, 0, 0xb41893},
+ {593407000, 370879375, 4, 494, 0, 3, 1, 1, 3, 0, 0, 1, 0x817e4a},
+ {593407000, 593407000, 3, 296, 0, 2, 0, 1, 0, 1, 1, 0, 0xb41893},
+ {594000000, 297000000, 1, 99, 0, 1, 1, 1, 0, 1, 1, 0, 0x0},
+ {594000000, 371250000, 4, 495, 0, 3, 1, 1, 3, 0, 0, 1, 0x0},
+ {594000000, 594000000, 1, 99, 0, 2, 0, 1, 0, 1, 1, 0, 0x0},
{ /* sentinel */ }
};
@@ -485,6 +633,8 @@ static int inno_hdmi_phy_power_on(struct phy *phy)
dev_dbg(inno->dev, "Inno HDMI PHY Power On\n");
+ inno->plat_data->clk_ops->set_rate(&inno->hw, inno->pixclock, 24000000);
+
ret = clk_prepare_enable(inno->phyclk);
if (ret)
return ret;
@@ -509,6 +659,8 @@ static int inno_hdmi_phy_power_off(struct phy *phy)
clk_disable_unprepare(inno->phyclk);
+ inno->tmdsclock = 0;
+
dev_dbg(inno->dev, "Inno HDMI PHY Power Off\n");
return 0;
@@ -628,6 +780,9 @@ static int inno_hdmi_phy_rk3228_clk_set_rate(struct clk_hw *hw,
dev_dbg(inno->dev, "%s rate %lu tmdsclk %lu\n",
__func__, rate, tmdsclock);
+ if (inno->pixclock == rate && inno->tmdsclock == tmdsclock)
+ return 0;
+
cfg = inno_hdmi_phy_get_pre_pll_cfg(inno, rate);
if (IS_ERR(cfg))
return PTR_ERR(cfg);
@@ -670,6 +825,7 @@ static int inno_hdmi_phy_rk3228_clk_set_rate(struct clk_hw *hw,
}
inno->pixclock = rate;
+ inno->tmdsclock = tmdsclock;
return 0;
}
@@ -714,7 +870,7 @@ unsigned long inno_hdmi_phy_rk3328_clk_recalc_rate(struct clk_hw *hw,
{
struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
unsigned long frac;
- u8 nd, no_a, no_b, no_c, no_d;
+ u8 nd, no_a, no_b, no_d;
u64 vco;
u16 nf;
@@ -737,18 +893,17 @@ unsigned long inno_hdmi_phy_rk3328_clk_recalc_rate(struct clk_hw *hw,
no_b = inno_read(inno, 0xa5) & RK3328_PRE_PLL_PCLK_DIV_B_MASK;
no_b >>= RK3328_PRE_PLL_PCLK_DIV_B_SHIFT;
no_b += 2;
- no_c = inno_read(inno, 0xa6) & RK3328_PRE_PLL_PCLK_DIV_C_MASK;
- no_c >>= RK3328_PRE_PLL_PCLK_DIV_C_SHIFT;
- no_c = 1 << no_c;
no_d = inno_read(inno, 0xa6) & RK3328_PRE_PLL_PCLK_DIV_D_MASK;
do_div(vco, (nd * (no_a == 1 ? no_b : no_a) * no_d * 2));
}
- inno->pixclock = vco;
- dev_dbg(inno->dev, "%s rate %lu\n", __func__, inno->pixclock);
+ inno->pixclock = DIV_ROUND_CLOSEST((unsigned long)vco, 1000) * 1000;
- return vco;
+ dev_dbg(inno->dev, "%s rate %lu vco %llu\n",
+ __func__, inno->pixclock, vco);
+
+ return inno->pixclock;
}
static long inno_hdmi_phy_rk3328_clk_round_rate(struct clk_hw *hw,
@@ -782,6 +937,9 @@ static int inno_hdmi_phy_rk3328_clk_set_rate(struct clk_hw *hw,
dev_dbg(inno->dev, "%s rate %lu tmdsclk %lu\n",
__func__, rate, tmdsclock);
+ if (inno->pixclock == rate && inno->tmdsclock == tmdsclock)
+ return 0;
+
cfg = inno_hdmi_phy_get_pre_pll_cfg(inno, rate);
if (IS_ERR(cfg))
return PTR_ERR(cfg);
@@ -790,8 +948,8 @@ static int inno_hdmi_phy_rk3328_clk_set_rate(struct clk_hw *hw,
RK3328_PRE_PLL_POWER_DOWN);
/* Configure pre-pll */
- inno_update_bits(inno, 0xa0, RK3228_PCLK_VCO_DIV_5_MASK,
- RK3228_PCLK_VCO_DIV_5(cfg->vco_div_5_en));
+ inno_update_bits(inno, 0xa0, RK3328_PCLK_VCO_DIV_5_MASK,
+ RK3328_PCLK_VCO_DIV_5(cfg->vco_div_5_en));
inno_write(inno, 0xa1, RK3328_PRE_PLL_PRE_DIV(cfg->prediv));
val = RK3328_SPREAD_SPECTRUM_MOD_DISABLE;
@@ -821,6 +979,7 @@ static int inno_hdmi_phy_rk3328_clk_set_rate(struct clk_hw *hw,
}
inno->pixclock = rate;
+ inno->tmdsclock = tmdsclock;
return 0;
}
@@ -1021,9 +1180,10 @@ inno_hdmi_phy_rk3328_power_on(struct inno_hdmi_phy *inno,
inno_write(inno, 0xac, RK3328_POST_PLL_FB_DIV_7_0(cfg->fbdiv));
if (cfg->postdiv == 1) {
- inno_write(inno, 0xaa, RK3328_POST_PLL_REFCLK_SEL_TMDS);
inno_write(inno, 0xab, RK3328_POST_PLL_FB_DIV_8(cfg->fbdiv) |
RK3328_POST_PLL_PRE_DIV(cfg->prediv));
+ inno_write(inno, 0xaa, RK3328_POST_PLL_REFCLK_SEL_TMDS |
+ RK3328_POST_PLL_POWER_DOWN);
} else {
v = (cfg->postdiv / 2) - 1;
v &= RK3328_POST_PLL_POST_DIV_MASK;
@@ -1031,7 +1191,8 @@ inno_hdmi_phy_rk3328_power_on(struct inno_hdmi_phy *inno,
inno_write(inno, 0xab, RK3328_POST_PLL_FB_DIV_8(cfg->fbdiv) |
RK3328_POST_PLL_PRE_DIV(cfg->prediv));
inno_write(inno, 0xaa, RK3328_POST_PLL_POST_DIV_ENABLE |
- RK3328_POST_PLL_REFCLK_SEL_TMDS);
+ RK3328_POST_PLL_REFCLK_SEL_TMDS |
+ RK3328_POST_PLL_POWER_DOWN);
}
for (v = 0; v < 14; v++)
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index a0bc10aa7961..b982c3f0d4b5 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/regmap.h>
+#include <linux/reset.h>
#include <linux/mfd/syscon.h>
#include <linux/usb/of.h>
#include <linux/usb/otg.h>
@@ -32,6 +33,8 @@
#define SCHEDULE_DELAY (60 * HZ)
#define OTG_SCHEDULE_DELAY (2 * HZ)
+struct rockchip_usb2phy;
+
enum rockchip_usb2phy_port_id {
USB2PHY_PORT_OTG,
USB2PHY_PORT_HOST,
@@ -116,6 +119,12 @@ struct rockchip_chg_det_reg {
* @bvalid_det_en: vbus valid rise detection enable register.
* @bvalid_det_st: vbus valid rise detection status register.
* @bvalid_det_clr: vbus valid rise detection clear register.
+ * @disfall_en: host disconnect fall edge detection enable.
+ * @disfall_st: host disconnect fall edge detection state.
+ * @disfall_clr: host disconnect fall edge detection clear.
+ * @disrise_en: host disconnect rise edge detection enable.
+ * @disrise_st: host disconnect rise edge detection state.
+ * @disrise_clr: host disconnect rise edge detection clear.
* @id_det_en: id detection enable register.
* @id_det_st: id detection state register.
* @id_det_clr: id detection clear register.
@@ -133,6 +142,12 @@ struct rockchip_usb2phy_port_cfg {
struct usb2phy_reg bvalid_det_en;
struct usb2phy_reg bvalid_det_st;
struct usb2phy_reg bvalid_det_clr;
+ struct usb2phy_reg disfall_en;
+ struct usb2phy_reg disfall_st;
+ struct usb2phy_reg disfall_clr;
+ struct usb2phy_reg disrise_en;
+ struct usb2phy_reg disrise_st;
+ struct usb2phy_reg disrise_clr;
struct usb2phy_reg id_det_en;
struct usb2phy_reg id_det_st;
struct usb2phy_reg id_det_clr;
@@ -150,6 +165,7 @@ struct rockchip_usb2phy_port_cfg {
* struct rockchip_usb2phy_cfg - usb-phy configuration.
* @reg: the address offset of grf for usb-phy config.
* @num_ports: specify how many ports that the phy has.
+ * @phy_tuning: phy default parameters tuning.
* @clkout_ctl: keep on/turn off output clk of phy.
* @port_cfgs: usb-phy port configurations.
* @chg_det: charger detection registers.
@@ -157,6 +173,7 @@ struct rockchip_usb2phy_port_cfg {
struct rockchip_usb2phy_cfg {
unsigned int reg;
unsigned int num_ports;
+ int (*phy_tuning)(struct rockchip_usb2phy *rphy);
struct usb2phy_reg clkout_ctl;
const struct rockchip_usb2phy_port_cfg port_cfgs[USB2PHY_NUM_PORTS];
const struct rockchip_chg_det_reg chg_det;
@@ -168,6 +185,7 @@ struct rockchip_usb2phy_cfg {
* @port_id: flag for otg port or host port.
* @suspended: phy suspended flag.
* @vbus_attached: otg device vbus status.
+ * @host_disconnect: usb host disconnect status.
* @bvalid_irq: IRQ number assigned for vbus valid rise detection.
* @id_irq: IRQ number assigned for ID pin detection.
* @ls_irq: IRQ number assigned for linestate detection.
@@ -187,6 +205,7 @@ struct rockchip_usb2phy_port {
unsigned int port_id;
bool suspended;
bool vbus_attached;
+ bool host_disconnect;
int bvalid_irq;
int id_irq;
int ls_irq;
@@ -209,6 +228,7 @@ struct rockchip_usb2phy_port {
* @clk: clock struct of phy input clk.
* @clk480m: clock struct of phy output clk.
* @clk480m_hw: clock struct of phy output clk management.
+ * @phy_reset: phy reset control.
* @chg_state: states involved in USB charger detection.
* @chg_type: USB charger types.
* @dcd_retries: The retry count used to track Data contact
@@ -225,6 +245,7 @@ struct rockchip_usb2phy {
struct clk *clk;
struct clk *clk480m;
struct clk_hw clk480m_hw;
+ struct reset_control *phy_reset;
enum usb_chg_state chg_state;
enum power_supply_type chg_type;
u8 dcd_retries;
@@ -266,6 +287,25 @@ static inline bool property_enabled(struct regmap *base,
return tmp != reg->disable;
}
+static int rockchip_usb2phy_reset(struct rockchip_usb2phy *rphy)
+{
+ int ret;
+
+ ret = reset_control_assert(rphy->phy_reset);
+ if (ret)
+ return ret;
+
+ udelay(10);
+
+ ret = reset_control_deassert(rphy->phy_reset);
+ if (ret)
+ return ret;
+
+ usleep_range(100, 200);
+
+ return 0;
+}
+
static int rockchip_usb2phy_clk480m_prepare(struct clk_hw *hw)
{
struct rockchip_usb2phy *rphy =
@@ -405,6 +445,27 @@ static int rockchip_usb2phy_extcon_register(struct rockchip_usb2phy *rphy)
return 0;
}
+static int rockchip_usb2phy_enable_host_disc_irq(struct rockchip_usb2phy *rphy,
+ struct rockchip_usb2phy_port *rport,
+ bool en)
+{
+ int ret;
+
+ ret = property_enable(rphy->grf, &rport->port_cfg->disfall_clr, true);
+ if (ret)
+ return ret;
+
+ ret = property_enable(rphy->grf, &rport->port_cfg->disfall_en, en);
+ if (ret)
+ return ret;
+
+ ret = property_enable(rphy->grf, &rport->port_cfg->disrise_clr, true);
+ if (ret)
+ return ret;
+
+ return property_enable(rphy->grf, &rport->port_cfg->disrise_en, en);
+}
+
static int rockchip_usb2phy_init(struct phy *phy)
{
struct rockchip_usb2phy_port *rport = phy_get_drvdata(phy);
@@ -449,6 +510,15 @@ static int rockchip_usb2phy_init(struct phy *phy)
dev_dbg(&rport->phy->dev, "mode %d\n", rport->mode);
}
} else if (rport->port_id == USB2PHY_PORT_HOST) {
+ if (rport->port_cfg->disfall_en.offset) {
+ rport->host_disconnect = true;
+ ret = rockchip_usb2phy_enable_host_disc_irq(rphy, rport, true);
+ if (ret) {
+ dev_err(rphy->dev, "failed to enable disconnect irq\n");
+ goto out;
+ }
+ }
+
/* clear linestate and enable linestate detect irq */
ret = property_enable(rphy->grf,
&rport->port_cfg->ls_det_clr, true);
@@ -490,6 +560,18 @@ static int rockchip_usb2phy_power_on(struct phy *phy)
return ret;
}
+ /*
+ * For rk3588, it needs to reset phy when exit from
+ * suspend mode with common_on_n 1'b1(aka REFCLK_LOGIC,
+ * Bias, and PLL blocks are powered down) for lower
+ * power consumption. If you don't want to reset phy,
+ * please keep the common_on_n 1'b0 to set these blocks
+ * remain powered.
+ */
+ ret = rockchip_usb2phy_reset(rphy);
+ if (ret)
+ return ret;
+
/* waiting for the utmi_clk to become stable */
usleep_range(1500, 2000);
@@ -810,9 +892,7 @@ static void rockchip_usb2phy_sm_work(struct work_struct *work)
struct rockchip_usb2phy_port *rport =
container_of(work, struct rockchip_usb2phy_port, sm_work.work);
struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
- unsigned int sh = rport->port_cfg->utmi_hstdet.bitend -
- rport->port_cfg->utmi_hstdet.bitstart + 1;
- unsigned int ul, uhd, state;
+ unsigned int sh, ul, uhd, state;
unsigned int ul_mask, uhd_mask;
int ret;
@@ -822,18 +902,26 @@ static void rockchip_usb2phy_sm_work(struct work_struct *work)
if (ret < 0)
goto next_schedule;
- ret = regmap_read(rphy->grf, rport->port_cfg->utmi_hstdet.offset, &uhd);
- if (ret < 0)
- goto next_schedule;
-
- uhd_mask = GENMASK(rport->port_cfg->utmi_hstdet.bitend,
- rport->port_cfg->utmi_hstdet.bitstart);
ul_mask = GENMASK(rport->port_cfg->utmi_ls.bitend,
rport->port_cfg->utmi_ls.bitstart);
- /* stitch on utmi_ls and utmi_hstdet as phy state */
- state = ((uhd & uhd_mask) >> rport->port_cfg->utmi_hstdet.bitstart) |
- (((ul & ul_mask) >> rport->port_cfg->utmi_ls.bitstart) << sh);
+ if (rport->port_cfg->utmi_hstdet.offset) {
+ ret = regmap_read(rphy->grf, rport->port_cfg->utmi_hstdet.offset, &uhd);
+ if (ret < 0)
+ goto next_schedule;
+
+ uhd_mask = GENMASK(rport->port_cfg->utmi_hstdet.bitend,
+ rport->port_cfg->utmi_hstdet.bitstart);
+
+ sh = rport->port_cfg->utmi_hstdet.bitend -
+ rport->port_cfg->utmi_hstdet.bitstart + 1;
+ /* stitch on utmi_ls and utmi_hstdet as phy state */
+ state = ((uhd & uhd_mask) >> rport->port_cfg->utmi_hstdet.bitstart) |
+ (((ul & ul_mask) >> rport->port_cfg->utmi_ls.bitstart) << sh);
+ } else {
+ state = ((ul & ul_mask) >> rport->port_cfg->utmi_ls.bitstart) << 1 |
+ rport->host_disconnect;
+ }
switch (state) {
case PHY_STATE_HS_ONLINE:
@@ -966,6 +1054,31 @@ static irqreturn_t rockchip_usb2phy_otg_mux_irq(int irq, void *data)
return ret;
}
+static irqreturn_t rockchip_usb2phy_host_disc_irq(int irq, void *data)
+{
+ struct rockchip_usb2phy_port *rport = data;
+ struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
+
+ if (!property_enabled(rphy->grf, &rport->port_cfg->disfall_st) &&
+ !property_enabled(rphy->grf, &rport->port_cfg->disrise_st))
+ return IRQ_NONE;
+
+ mutex_lock(&rport->mutex);
+
+ /* clear disconnect fall or rise detect irq pending status */
+ if (property_enabled(rphy->grf, &rport->port_cfg->disfall_st)) {
+ property_enable(rphy->grf, &rport->port_cfg->disfall_clr, true);
+ rport->host_disconnect = false;
+ } else if (property_enabled(rphy->grf, &rport->port_cfg->disrise_st)) {
+ property_enable(rphy->grf, &rport->port_cfg->disrise_clr, true);
+ rport->host_disconnect = true;
+ }
+
+ mutex_unlock(&rport->mutex);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t rockchip_usb2phy_irq(int irq, void *data)
{
struct rockchip_usb2phy *rphy = data;
@@ -978,6 +1091,10 @@ static irqreturn_t rockchip_usb2phy_irq(int irq, void *data)
if (!rport->phy)
continue;
+ if (rport->port_id == USB2PHY_PORT_HOST &&
+ rport->port_cfg->disfall_en.offset)
+ ret |= rockchip_usb2phy_host_disc_irq(irq, rport);
+
switch (rport->port_id) {
case USB2PHY_PORT_OTG:
if (rport->mode != USB_DR_MODE_HOST &&
@@ -1188,7 +1305,6 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
struct phy_provider *provider;
struct rockchip_usb2phy *rphy;
const struct rockchip_usb2phy_cfg *phy_cfgs;
- const struct of_device_id *match;
unsigned int reg;
int index, ret;
@@ -1196,12 +1312,6 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
if (!rphy)
return -ENOMEM;
- match = of_match_device(dev->driver->of_match_table, dev);
- if (!match || !match->data) {
- dev_err(dev, "phy configs are not assigned!\n");
- return -EINVAL;
- }
-
if (!dev->parent || !dev->parent->of_node) {
rphy->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,usbgrf");
if (IS_ERR(rphy->grf)) {
@@ -1233,7 +1343,7 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
}
/* support address_cells=2 */
- if (reg == 0) {
+ if (of_property_count_u32_elems(np, "reg") > 2 && reg == 0) {
if (of_property_read_u32_index(np, "reg", 1, &reg)) {
dev_err(dev, "the reg property is not assigned in %pOFn node\n",
np);
@@ -1242,45 +1352,55 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
}
rphy->dev = dev;
- phy_cfgs = match->data;
+ phy_cfgs = device_get_match_data(dev);
rphy->chg_state = USB_CHG_STATE_UNDEFINED;
rphy->chg_type = POWER_SUPPLY_TYPE_UNKNOWN;
rphy->irq = platform_get_irq_optional(pdev, 0);
platform_set_drvdata(pdev, rphy);
+ if (!phy_cfgs)
+ return dev_err_probe(dev, -EINVAL, "phy configs are not assigned!\n");
+
ret = rockchip_usb2phy_extcon_register(rphy);
if (ret)
return ret;
/* find out a proper config which can be matched with dt. */
index = 0;
- while (phy_cfgs[index].reg) {
+ do {
if (phy_cfgs[index].reg == reg) {
rphy->phy_cfg = &phy_cfgs[index];
break;
}
++index;
- }
+ } while (phy_cfgs[index].reg);
if (!rphy->phy_cfg) {
- dev_err(dev, "no phy-config can be matched with %pOFn node\n",
- np);
+ dev_err(dev, "could not find phy config for reg=0x%08x\n", reg);
return -EINVAL;
}
- rphy->clk = of_clk_get_by_name(np, "phyclk");
- if (!IS_ERR(rphy->clk)) {
- clk_prepare_enable(rphy->clk);
- } else {
- dev_info(&pdev->dev, "no phyclk specified\n");
- rphy->clk = NULL;
+ rphy->phy_reset = devm_reset_control_get_optional(dev, "phy");
+ if (IS_ERR(rphy->phy_reset))
+ return PTR_ERR(rphy->phy_reset);
+
+ rphy->clk = devm_clk_get_optional_enabled(dev, "phyclk");
+ if (IS_ERR(rphy->clk)) {
+ return dev_err_probe(&pdev->dev, PTR_ERR(rphy->clk),
+ "failed to get phyclk\n");
}
ret = rockchip_usb2phy_clk480m_register(rphy);
if (ret) {
dev_err(dev, "failed to register 480m output clock\n");
- goto disable_clks;
+ return ret;
+ }
+
+ if (rphy->phy_cfg->phy_tuning) {
+ ret = rphy->phy_cfg->phy_tuning(rphy);
+ if (ret)
+ return ret;
}
index = 0;
@@ -1343,11 +1463,55 @@ next_child:
put_child:
of_node_put(child_np);
-disable_clks:
- if (rphy->clk) {
- clk_disable_unprepare(rphy->clk);
- clk_put(rphy->clk);
+ return ret;
+}
+
+static int rk3588_usb2phy_tuning(struct rockchip_usb2phy *rphy)
+{
+ int ret;
+ bool usb3otg = false;
+ /*
+ * utmi_termselect = 1'b1 (en FS terminations)
+ * utmi_xcvrselect = 2'b01 (FS transceiver)
+ */
+ int suspend_cfg = 0x14;
+
+ if (rphy->phy_cfg->reg == 0x0000 || rphy->phy_cfg->reg == 0x4000) {
+ /* USB2 config for USB3_0 and USB3_1 */
+ suspend_cfg |= 0x01; /* utmi_opmode = 2'b01 (no-driving) */
+ usb3otg = true;
+ } else if (rphy->phy_cfg->reg == 0x8000 || rphy->phy_cfg->reg == 0xc000) {
+ /* USB2 config for USB2_0 and USB2_1 */
+ suspend_cfg |= 0x00; /* utmi_opmode = 2'b00 (normal) */
+ } else {
+ return -EINVAL;
}
+
+ /* Deassert SIDDQ to power on analog block */
+ ret = regmap_write(rphy->grf, 0x0008, GENMASK(29, 29) | 0x0000);
+ if (ret)
+ return ret;
+
+ /* Do reset after exit IDDQ mode */
+ ret = rockchip_usb2phy_reset(rphy);
+ if (ret)
+ return ret;
+
+ /* suspend configuration */
+ ret |= regmap_write(rphy->grf, 0x000c, GENMASK(20, 16) | suspend_cfg);
+
+ /* HS DC Voltage Level Adjustment 4'b1001 : +5.89% */
+ ret |= regmap_write(rphy->grf, 0x0004, GENMASK(27, 24) | 0x0900);
+
+ /* HS Transmitter Pre-Emphasis Current Control 2'b10 : 2x */
+ ret |= regmap_write(rphy->grf, 0x0008, GENMASK(20, 19) | 0x0010);
+
+ if (!usb3otg)
+ return ret;
+
+ /* Pullup iddig pin for USB3_0 OTG mode */
+ ret |= regmap_write(rphy->grf, 0x0010, GENMASK(17, 16) | 0x0003);
+
return ret;
}
@@ -1664,6 +1828,126 @@ static const struct rockchip_usb2phy_cfg rk3568_phy_cfgs[] = {
{ /* sentinel */ }
};
+static const struct rockchip_usb2phy_cfg rk3588_phy_cfgs[] = {
+ {
+ .reg = 0x0000,
+ .num_ports = 1,
+ .phy_tuning = rk3588_usb2phy_tuning,
+ .clkout_ctl = { 0x0000, 0, 0, 1, 0 },
+ .port_cfgs = {
+ [USB2PHY_PORT_OTG] = {
+ .phy_sus = { 0x000c, 11, 11, 0, 1 },
+ .bvalid_det_en = { 0x0080, 1, 1, 0, 1 },
+ .bvalid_det_st = { 0x0084, 1, 1, 0, 1 },
+ .bvalid_det_clr = { 0x0088, 1, 1, 0, 1 },
+ .ls_det_en = { 0x0080, 0, 0, 0, 1 },
+ .ls_det_st = { 0x0084, 0, 0, 0, 1 },
+ .ls_det_clr = { 0x0088, 0, 0, 0, 1 },
+ .disfall_en = { 0x0080, 6, 6, 0, 1 },
+ .disfall_st = { 0x0084, 6, 6, 0, 1 },
+ .disfall_clr = { 0x0088, 6, 6, 0, 1 },
+ .disrise_en = { 0x0080, 5, 5, 0, 1 },
+ .disrise_st = { 0x0084, 5, 5, 0, 1 },
+ .disrise_clr = { 0x0088, 5, 5, 0, 1 },
+ .utmi_avalid = { 0x00c0, 7, 7, 0, 1 },
+ .utmi_bvalid = { 0x00c0, 6, 6, 0, 1 },
+ .utmi_ls = { 0x00c0, 10, 9, 0, 1 },
+ }
+ },
+ .chg_det = {
+ .cp_det = { 0x00c0, 0, 0, 0, 1 },
+ .dcp_det = { 0x00c0, 0, 0, 0, 1 },
+ .dp_det = { 0x00c0, 1, 1, 1, 0 },
+ .idm_sink_en = { 0x0008, 5, 5, 1, 0 },
+ .idp_sink_en = { 0x0008, 5, 5, 0, 1 },
+ .idp_src_en = { 0x0008, 14, 14, 0, 1 },
+ .rdm_pdwn_en = { 0x0008, 14, 14, 0, 1 },
+ .vdm_src_en = { 0x0008, 7, 6, 0, 3 },
+ .vdp_src_en = { 0x0008, 7, 6, 0, 3 },
+ },
+ },
+ {
+ .reg = 0x4000,
+ .num_ports = 1,
+ .phy_tuning = rk3588_usb2phy_tuning,
+ .clkout_ctl = { 0x0000, 0, 0, 1, 0 },
+ .port_cfgs = {
+ [USB2PHY_PORT_OTG] = {
+ .phy_sus = { 0x000c, 11, 11, 0, 1 },
+ .bvalid_det_en = { 0x0080, 1, 1, 0, 1 },
+ .bvalid_det_st = { 0x0084, 1, 1, 0, 1 },
+ .bvalid_det_clr = { 0x0088, 1, 1, 0, 1 },
+ .ls_det_en = { 0x0080, 0, 0, 0, 1 },
+ .ls_det_st = { 0x0084, 0, 0, 0, 1 },
+ .ls_det_clr = { 0x0088, 0, 0, 0, 1 },
+ .disfall_en = { 0x0080, 6, 6, 0, 1 },
+ .disfall_st = { 0x0084, 6, 6, 0, 1 },
+ .disfall_clr = { 0x0088, 6, 6, 0, 1 },
+ .disrise_en = { 0x0080, 5, 5, 0, 1 },
+ .disrise_st = { 0x0084, 5, 5, 0, 1 },
+ .disrise_clr = { 0x0088, 5, 5, 0, 1 },
+ .utmi_avalid = { 0x00c0, 7, 7, 0, 1 },
+ .utmi_bvalid = { 0x00c0, 6, 6, 0, 1 },
+ .utmi_ls = { 0x00c0, 10, 9, 0, 1 },
+ }
+ },
+ .chg_det = {
+ .cp_det = { 0x00c0, 0, 0, 0, 1 },
+ .dcp_det = { 0x00c0, 0, 0, 0, 1 },
+ .dp_det = { 0x00c0, 1, 1, 1, 0 },
+ .idm_sink_en = { 0x0008, 5, 5, 1, 0 },
+ .idp_sink_en = { 0x0008, 5, 5, 0, 1 },
+ .idp_src_en = { 0x0008, 14, 14, 0, 1 },
+ .rdm_pdwn_en = { 0x0008, 14, 14, 0, 1 },
+ .vdm_src_en = { 0x0008, 7, 6, 0, 3 },
+ .vdp_src_en = { 0x0008, 7, 6, 0, 3 },
+ },
+ },
+ {
+ .reg = 0x8000,
+ .num_ports = 1,
+ .phy_tuning = rk3588_usb2phy_tuning,
+ .clkout_ctl = { 0x0000, 0, 0, 1, 0 },
+ .port_cfgs = {
+ [USB2PHY_PORT_HOST] = {
+ .phy_sus = { 0x0008, 2, 2, 0, 1 },
+ .ls_det_en = { 0x0080, 0, 0, 0, 1 },
+ .ls_det_st = { 0x0084, 0, 0, 0, 1 },
+ .ls_det_clr = { 0x0088, 0, 0, 0, 1 },
+ .disfall_en = { 0x0080, 6, 6, 0, 1 },
+ .disfall_st = { 0x0084, 6, 6, 0, 1 },
+ .disfall_clr = { 0x0088, 6, 6, 0, 1 },
+ .disrise_en = { 0x0080, 5, 5, 0, 1 },
+ .disrise_st = { 0x0084, 5, 5, 0, 1 },
+ .disrise_clr = { 0x0088, 5, 5, 0, 1 },
+ .utmi_ls = { 0x00c0, 10, 9, 0, 1 },
+ }
+ },
+ },
+ {
+ .reg = 0xc000,
+ .num_ports = 1,
+ .phy_tuning = rk3588_usb2phy_tuning,
+ .clkout_ctl = { 0x0000, 0, 0, 1, 0 },
+ .port_cfgs = {
+ [USB2PHY_PORT_HOST] = {
+ .phy_sus = { 0x0008, 2, 2, 0, 1 },
+ .ls_det_en = { 0x0080, 0, 0, 0, 1 },
+ .ls_det_st = { 0x0084, 0, 0, 0, 1 },
+ .ls_det_clr = { 0x0088, 0, 0, 0, 1 },
+ .disfall_en = { 0x0080, 6, 6, 0, 1 },
+ .disfall_st = { 0x0084, 6, 6, 0, 1 },
+ .disfall_clr = { 0x0088, 6, 6, 0, 1 },
+ .disrise_en = { 0x0080, 5, 5, 0, 1 },
+ .disrise_st = { 0x0084, 5, 5, 0, 1 },
+ .disrise_clr = { 0x0088, 5, 5, 0, 1 },
+ .utmi_ls = { 0x00c0, 10, 9, 0, 1 },
+ }
+ },
+ },
+ { /* sentinel */ }
+};
+
static const struct rockchip_usb2phy_cfg rv1108_phy_cfgs[] = {
{
.reg = 0x100,
@@ -1714,6 +1998,7 @@ static const struct of_device_id rockchip_usb2phy_dt_match[] = {
{ .compatible = "rockchip,rk3366-usb2phy", .data = &rk3366_phy_cfgs },
{ .compatible = "rockchip,rk3399-usb2phy", .data = &rk3399_phy_cfgs },
{ .compatible = "rockchip,rk3568-usb2phy", .data = &rk3568_phy_cfgs },
+ { .compatible = "rockchip,rk3588-usb2phy", .data = &rk3588_phy_cfgs },
{ .compatible = "rockchip,rv1108-usb2phy", .data = &rv1108_phy_cfgs },
{}
};
diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
index 7b8b001e4f9e..5de5e2e97ffa 100644
--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
@@ -8,8 +8,9 @@
#include <dt-bindings/phy/phy.h>
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/units.h>
diff --git a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
index 1d355b32ba55..121e5961ce11 100644
--- a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
+++ b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
@@ -12,9 +12,10 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/pcie.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index 8b1667be4915..4efcb78b0ab1 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -1116,8 +1116,7 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tcphy->base = devm_ioremap_resource(dev, res);
+ tcphy->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(tcphy->base))
return PTR_ERR(tcphy->base);
diff --git a/drivers/phy/samsung/phy-exynos-dp-video.c b/drivers/phy/samsung/phy-exynos-dp-video.c
index 6069fedbd8f3..a636dee07585 100644
--- a/drivers/phy/samsung/phy-exynos-dp-video.c
+++ b/drivers/phy/samsung/phy-exynos-dp-video.c
@@ -12,8 +12,6 @@
#include <linux/module.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/samsung/phy-exynos-mipi-video.c b/drivers/phy/samsung/phy-exynos-mipi-video.c
index a7f67857e5b2..592d8067e848 100644
--- a/drivers/phy/samsung/phy-exynos-mipi-video.c
+++ b/drivers/phy/samsung/phy-exynos-mipi-video.c
@@ -11,9 +11,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/spinlock.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index ee0848fe8432..3f310b28bfff 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -14,8 +14,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/iopoll.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -32,6 +30,7 @@
#define EXYNOS5_FSEL_19MHZ2 0x3
#define EXYNOS5_FSEL_20MHZ 0x4
#define EXYNOS5_FSEL_24MHZ 0x5
+#define EXYNOS5_FSEL_26MHZ 0x82
#define EXYNOS5_FSEL_50MHZ 0x7
/* Exynos5: USB 3.0 DRD PHY registers */
@@ -146,6 +145,34 @@
#define LANE0_TX_DEBUG_RXDET_MEAS_TIME_62M5 (0x20 << 4)
#define LANE0_TX_DEBUG_RXDET_MEAS_TIME_96M_100M (0x40 << 4)
+/* Exynos850: USB DRD PHY registers */
+#define EXYNOS850_DRD_LINKCTRL 0x04
+#define LINKCTRL_BUS_FILTER_BYPASS(_x) ((_x) << 4)
+#define LINKCTRL_FORCE_QACT BIT(8)
+
+#define EXYNOS850_DRD_CLKRST 0x20
+#define CLKRST_LINK_SW_RST BIT(0)
+#define CLKRST_PORT_RST BIT(1)
+#define CLKRST_PHY_SW_RST BIT(3)
+
+#define EXYNOS850_DRD_UTMI 0x50
+#define UTMI_FORCE_SLEEP BIT(0)
+#define UTMI_FORCE_SUSPEND BIT(1)
+#define UTMI_DM_PULLDOWN BIT(2)
+#define UTMI_DP_PULLDOWN BIT(3)
+#define UTMI_FORCE_BVALID BIT(4)
+#define UTMI_FORCE_VBUSVALID BIT(5)
+
+#define EXYNOS850_DRD_HSP 0x54
+#define HSP_COMMONONN BIT(8)
+#define HSP_EN_UTMISUSPEND BIT(9)
+#define HSP_VBUSVLDEXT BIT(12)
+#define HSP_VBUSVLDEXTSEL BIT(13)
+#define HSP_FSV_OUT_EN BIT(24)
+
+#define EXYNOS850_DRD_HSP_TEST 0x5c
+#define HSP_TEST_SIDDQ BIT(24)
+
#define KHZ 1000
#define MHZ (KHZ * KHZ)
@@ -167,6 +194,7 @@ struct exynos5_usbdrd_phy_config {
struct exynos5_usbdrd_phy_drvdata {
const struct exynos5_usbdrd_phy_config *phy_cfg;
+ const struct phy_ops *phy_ops;
u32 pmu_offset_usbdrd0_phy;
u32 pmu_offset_usbdrd1_phy;
bool has_common_clk_gate;
@@ -245,6 +273,9 @@ static unsigned int exynos5_rate_to_clk(unsigned long rate, u32 *reg)
case 24 * MHZ:
*reg = EXYNOS5_FSEL_24MHZ;
break;
+ case 26 * MHZ:
+ *reg = EXYNOS5_FSEL_26MHZ;
+ break;
case 50 * MHZ:
*reg = EXYNOS5_FSEL_50MHZ;
break;
@@ -713,6 +744,129 @@ static const struct phy_ops exynos5_usbdrd_phy_ops = {
.owner = THIS_MODULE,
};
+static void exynos850_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd)
+{
+ void __iomem *regs_base = phy_drd->reg_phy;
+ u32 reg;
+
+ /*
+ * Disable HWACG (hardware auto clock gating control). This will force
+ * QACTIVE signal in Q-Channel interface to HIGH level, to make sure
+ * the PHY clock is not gated by the hardware.
+ */
+ reg = readl(regs_base + EXYNOS850_DRD_LINKCTRL);
+ reg |= LINKCTRL_FORCE_QACT;
+ writel(reg, regs_base + EXYNOS850_DRD_LINKCTRL);
+
+ /* Start PHY Reset (POR=high) */
+ reg = readl(regs_base + EXYNOS850_DRD_CLKRST);
+ reg |= CLKRST_PHY_SW_RST;
+ writel(reg, regs_base + EXYNOS850_DRD_CLKRST);
+
+ /* Enable UTMI+ */
+ reg = readl(regs_base + EXYNOS850_DRD_UTMI);
+ reg &= ~(UTMI_FORCE_SUSPEND | UTMI_FORCE_SLEEP | UTMI_DP_PULLDOWN |
+ UTMI_DM_PULLDOWN);
+ writel(reg, regs_base + EXYNOS850_DRD_UTMI);
+
+ /* Set PHY clock and control HS PHY */
+ reg = readl(regs_base + EXYNOS850_DRD_HSP);
+ reg |= HSP_EN_UTMISUSPEND | HSP_COMMONONN;
+ writel(reg, regs_base + EXYNOS850_DRD_HSP);
+
+ /* Set VBUS Valid and D+ pull-up control by VBUS pad usage */
+ reg = readl(regs_base + EXYNOS850_DRD_LINKCTRL);
+ reg |= LINKCTRL_BUS_FILTER_BYPASS(0xf);
+ writel(reg, regs_base + EXYNOS850_DRD_LINKCTRL);
+
+ reg = readl(regs_base + EXYNOS850_DRD_UTMI);
+ reg |= UTMI_FORCE_BVALID | UTMI_FORCE_VBUSVALID;
+ writel(reg, regs_base + EXYNOS850_DRD_UTMI);
+
+ reg = readl(regs_base + EXYNOS850_DRD_HSP);
+ reg |= HSP_VBUSVLDEXT | HSP_VBUSVLDEXTSEL;
+ writel(reg, regs_base + EXYNOS850_DRD_HSP);
+
+ /* Power up PHY analog blocks */
+ reg = readl(regs_base + EXYNOS850_DRD_HSP_TEST);
+ reg &= ~HSP_TEST_SIDDQ;
+ writel(reg, regs_base + EXYNOS850_DRD_HSP_TEST);
+
+ /* Finish PHY reset (POR=low) */
+ udelay(10); /* required before doing POR=low */
+ reg = readl(regs_base + EXYNOS850_DRD_CLKRST);
+ reg &= ~(CLKRST_PHY_SW_RST | CLKRST_PORT_RST);
+ writel(reg, regs_base + EXYNOS850_DRD_CLKRST);
+ udelay(75); /* required after POR=low for guaranteed PHY clock */
+
+ /* Disable single ended signal out */
+ reg = readl(regs_base + EXYNOS850_DRD_HSP);
+ reg &= ~HSP_FSV_OUT_EN;
+ writel(reg, regs_base + EXYNOS850_DRD_HSP);
+}
+
+static int exynos850_usbdrd_phy_init(struct phy *phy)
+{
+ struct phy_usb_instance *inst = phy_get_drvdata(phy);
+ struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
+ int ret;
+
+ ret = clk_prepare_enable(phy_drd->clk);
+ if (ret)
+ return ret;
+
+ /* UTMI or PIPE3 specific init */
+ inst->phy_cfg->phy_init(phy_drd);
+
+ clk_disable_unprepare(phy_drd->clk);
+
+ return 0;
+}
+
+static int exynos850_usbdrd_phy_exit(struct phy *phy)
+{
+ struct phy_usb_instance *inst = phy_get_drvdata(phy);
+ struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
+ void __iomem *regs_base = phy_drd->reg_phy;
+ u32 reg;
+ int ret;
+
+ ret = clk_prepare_enable(phy_drd->clk);
+ if (ret)
+ return ret;
+
+ /* Set PHY clock and control HS PHY */
+ reg = readl(regs_base + EXYNOS850_DRD_UTMI);
+ reg &= ~(UTMI_DP_PULLDOWN | UTMI_DM_PULLDOWN);
+ reg |= UTMI_FORCE_SUSPEND | UTMI_FORCE_SLEEP;
+ writel(reg, regs_base + EXYNOS850_DRD_UTMI);
+
+ /* Power down PHY analog blocks */
+ reg = readl(regs_base + EXYNOS850_DRD_HSP_TEST);
+ reg |= HSP_TEST_SIDDQ;
+ writel(reg, regs_base + EXYNOS850_DRD_HSP_TEST);
+
+ /* Link reset */
+ reg = readl(regs_base + EXYNOS850_DRD_CLKRST);
+ reg |= CLKRST_LINK_SW_RST;
+ writel(reg, regs_base + EXYNOS850_DRD_CLKRST);
+ udelay(10); /* required before doing POR=low */
+ reg &= ~CLKRST_LINK_SW_RST;
+ writel(reg, regs_base + EXYNOS850_DRD_CLKRST);
+
+ clk_disable_unprepare(phy_drd->clk);
+
+ return 0;
+}
+
+static const struct phy_ops exynos850_usbdrd_phy_ops = {
+ .init = exynos850_usbdrd_phy_init,
+ .exit = exynos850_usbdrd_phy_exit,
+ .power_on = exynos5_usbdrd_phy_power_on,
+ .power_off = exynos5_usbdrd_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
static int exynos5_usbdrd_phy_clk_handle(struct exynos5_usbdrd_phy *phy_drd)
{
unsigned long ref_rate;
@@ -779,8 +933,17 @@ static const struct exynos5_usbdrd_phy_config phy_cfg_exynos5[] = {
},
};
+static const struct exynos5_usbdrd_phy_config phy_cfg_exynos850[] = {
+ {
+ .id = EXYNOS5_DRDPHY_UTMI,
+ .phy_isol = exynos5_usbdrd_phy_isol,
+ .phy_init = exynos850_usbdrd_utmi_init,
+ },
+};
+
static const struct exynos5_usbdrd_phy_drvdata exynos5420_usbdrd_phy = {
.phy_cfg = phy_cfg_exynos5,
+ .phy_ops = &exynos5_usbdrd_phy_ops,
.pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
.pmu_offset_usbdrd1_phy = EXYNOS5420_USBDRD1_PHY_CONTROL,
.has_common_clk_gate = true,
@@ -788,12 +951,14 @@ static const struct exynos5_usbdrd_phy_drvdata exynos5420_usbdrd_phy = {
static const struct exynos5_usbdrd_phy_drvdata exynos5250_usbdrd_phy = {
.phy_cfg = phy_cfg_exynos5,
+ .phy_ops = &exynos5_usbdrd_phy_ops,
.pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
.has_common_clk_gate = true,
};
static const struct exynos5_usbdrd_phy_drvdata exynos5433_usbdrd_phy = {
.phy_cfg = phy_cfg_exynos5,
+ .phy_ops = &exynos5_usbdrd_phy_ops,
.pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
.pmu_offset_usbdrd1_phy = EXYNOS5433_USBHOST30_PHY_CONTROL,
.has_common_clk_gate = false,
@@ -801,10 +966,18 @@ static const struct exynos5_usbdrd_phy_drvdata exynos5433_usbdrd_phy = {
static const struct exynos5_usbdrd_phy_drvdata exynos7_usbdrd_phy = {
.phy_cfg = phy_cfg_exynos5,
+ .phy_ops = &exynos5_usbdrd_phy_ops,
.pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
.has_common_clk_gate = false,
};
+static const struct exynos5_usbdrd_phy_drvdata exynos850_usbdrd_phy = {
+ .phy_cfg = phy_cfg_exynos850,
+ .phy_ops = &exynos850_usbdrd_phy_ops,
+ .pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
+ .has_common_clk_gate = true,
+};
+
static const struct of_device_id exynos5_usbdrd_phy_of_match[] = {
{
.compatible = "samsung,exynos5250-usbdrd-phy",
@@ -818,6 +991,9 @@ static const struct of_device_id exynos5_usbdrd_phy_of_match[] = {
}, {
.compatible = "samsung,exynos7-usbdrd-phy",
.data = &exynos7_usbdrd_phy
+ }, {
+ .compatible = "samsung,exynos850-usbdrd-phy",
+ .data = &exynos850_usbdrd_phy
},
{ },
};
@@ -908,8 +1084,8 @@ static int exynos5_usbdrd_phy_probe(struct platform_device *pdev)
dev_vdbg(dev, "Creating usbdrd_phy phy\n");
for (i = 0; i < EXYNOS5_DRDPHYS_NUM; i++) {
- struct phy *phy = devm_phy_create(dev, NULL,
- &exynos5_usbdrd_phy_ops);
+ struct phy *phy = devm_phy_create(dev, NULL, drv_data->phy_ops);
+
if (IS_ERR(phy)) {
dev_err(dev, "Failed to create usbdrd_phy phy\n");
return PTR_ERR(phy);
diff --git a/drivers/phy/samsung/phy-samsung-usb2.c b/drivers/phy/samsung/phy-samsung-usb2.c
index ec2befabeea6..68a174eca0ba 100644
--- a/drivers/phy/samsung/phy-samsung-usb2.c
+++ b/drivers/phy/samsung/phy-samsung-usb2.c
@@ -10,8 +10,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
diff --git a/drivers/phy/socionext/phy-uniphier-pcie.c b/drivers/phy/socionext/phy-uniphier-pcie.c
index ebca296ef123..c19173492b79 100644
--- a/drivers/phy/socionext/phy-uniphier-pcie.c
+++ b/drivers/phy/socionext/phy-uniphier-pcie.c
@@ -11,7 +11,7 @@
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/st/phy-spear1310-miphy.c b/drivers/phy/st/phy-spear1310-miphy.c
index 292413db7da4..35a9831b5161 100644
--- a/drivers/phy/st/phy-spear1310-miphy.c
+++ b/drivers/phy/st/phy-spear1310-miphy.c
@@ -13,8 +13,9 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
/* SPEAr1310 Registers */
diff --git a/drivers/phy/st/phy-spear1340-miphy.c b/drivers/phy/st/phy-spear1340-miphy.c
index c1d9ffa5a311..34a1cf21015f 100644
--- a/drivers/phy/st/phy-spear1340-miphy.c
+++ b/drivers/phy/st/phy-spear1340-miphy.c
@@ -13,8 +13,9 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
/* SPEAr1340 Registers */
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index 0a8552628cbd..d5e7e44000b5 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -12,8 +12,9 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/units.h>
diff --git a/drivers/phy/starfive/Kconfig b/drivers/phy/starfive/Kconfig
new file mode 100644
index 000000000000..9508e2143011
--- /dev/null
+++ b/drivers/phy/starfive/Kconfig
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Phy drivers for StarFive platforms
+#
+
+if ARCH_STARFIVE || COMPILE_TEST
+
+config PHY_STARFIVE_JH7110_DPHY_RX
+ tristate "StarFive JH7110 D-PHY RX support"
+ depends on HAS_IOMEM
+ select GENERIC_PHY
+ select GENERIC_PHY_MIPI_DPHY
+ help
+ Choose this option if you have a StarFive D-PHY in your
+ system. If M is selected, the module will be called
+ phy-jh7110-dphy-rx.ko.
+
+config PHY_STARFIVE_JH7110_PCIE
+ tristate "Starfive JH7110 PCIE 2.0/USB 3.0 PHY support"
+ depends on HAS_IOMEM
+ select GENERIC_PHY
+ help
+ Enable this to support the StarFive PCIe 2.0 PHY,
+ or used as USB 3.0 PHY.
+ If M is selected, the module will be called
+ phy-jh7110-pcie.ko.
+
+config PHY_STARFIVE_JH7110_USB
+ tristate "Starfive JH7110 USB 2.0 PHY support"
+ depends on USB_SUPPORT
+ select GENERIC_PHY
+ help
+ Enable this to support the StarFive USB 2.0 PHY,
+ used with the Cadence USB controller.
+ If M is selected, the module will be called
+ phy-jh7110-usb.ko.
+
+endif # ARCH_STARFIVE || COMPILE_TEST
diff --git a/drivers/phy/starfive/Makefile b/drivers/phy/starfive/Makefile
new file mode 100644
index 000000000000..b391018b7c47
--- /dev/null
+++ b/drivers/phy/starfive/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PHY_STARFIVE_JH7110_DPHY_RX) += phy-jh7110-dphy-rx.o
+obj-$(CONFIG_PHY_STARFIVE_JH7110_PCIE) += phy-jh7110-pcie.o
+obj-$(CONFIG_PHY_STARFIVE_JH7110_USB) += phy-jh7110-usb.o
diff --git a/drivers/phy/starfive/phy-jh7110-dphy-rx.c b/drivers/phy/starfive/phy-jh7110-dphy-rx.c
new file mode 100644
index 000000000000..037a9e0263cd
--- /dev/null
+++ b/drivers/phy/starfive/phy-jh7110-dphy-rx.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * StarFive JH7110 DPHY RX driver
+ *
+ * Copyright (C) 2023 StarFive Technology Co., Ltd.
+ * Author: Jack Zhu <jack.zhu@starfivetech.com>
+ * Author: Changhuang Liang <changhuang.liang@starfivetech.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#define STF_DPHY_APBCFGSAIF_SYSCFG(x) (x)
+
+#define STF_DPHY_ENABLE_CLK BIT(6)
+#define STF_DPHY_ENABLE_CLK1 BIT(7)
+#define STF_DPHY_ENABLE_LAN0 BIT(8)
+#define STF_DPHY_ENABLE_LAN1 BIT(9)
+#define STF_DPHY_ENABLE_LAN2 BIT(10)
+#define STF_DPHY_ENABLE_LAN3 BIT(11)
+#define STF_DPHY_LANE_SWAP_CLK GENMASK(22, 20)
+#define STF_DPHY_LANE_SWAP_CLK1 GENMASK(25, 23)
+#define STF_DPHY_LANE_SWAP_LAN0 GENMASK(28, 26)
+#define STF_DPHY_LANE_SWAP_LAN1 GENMASK(31, 29)
+
+#define STF_DPHY_LANE_SWAP_LAN2 GENMASK(2, 0)
+#define STF_DPHY_LANE_SWAP_LAN3 GENMASK(5, 3)
+#define STF_DPHY_PLL_CLK_SEL GENMASK(21, 12)
+#define STF_DPHY_PRECOUNTER_IN_CLK GENMASK(29, 22)
+
+#define STF_DPHY_PRECOUNTER_IN_CLK1 GENMASK(7, 0)
+#define STF_DPHY_PRECOUNTER_IN_LAN0 GENMASK(15, 8)
+#define STF_DPHY_PRECOUNTER_IN_LAN1 GENMASK(23, 16)
+#define STF_DPHY_PRECOUNTER_IN_LAN2 GENMASK(31, 24)
+
+#define STF_DPHY_PRECOUNTER_IN_LAN3 GENMASK(7, 0)
+#define STF_DPHY_RX_1C2C_SEL BIT(8)
+
+#define STF_MAP_LANES_NUM 6
+
+struct regval {
+ u32 addr;
+ u32 val;
+};
+
+struct stf_dphy_info {
+ /**
+ * @maps:
+ *
+ * Physical lanes and logic lanes mapping table.
+ *
+ * The default order is:
+ * [clk lane0, data lane 0, data lane 1, data lane 2, date lane 3, clk lane 1]
+ */
+ u8 maps[STF_MAP_LANES_NUM];
+};
+
+struct stf_dphy {
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *cfg_clk;
+ struct clk *ref_clk;
+ struct clk *tx_clk;
+ struct reset_control *rstc;
+ struct regulator *mipi_0p9;
+ struct phy *phy;
+ const struct stf_dphy_info *info;
+};
+
+static int stf_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
+{
+ struct stf_dphy *dphy = phy_get_drvdata(phy);
+ const struct stf_dphy_info *info = dphy->info;
+
+ writel(FIELD_PREP(STF_DPHY_ENABLE_CLK, 1) |
+ FIELD_PREP(STF_DPHY_ENABLE_CLK1, 1) |
+ FIELD_PREP(STF_DPHY_ENABLE_LAN0, 1) |
+ FIELD_PREP(STF_DPHY_ENABLE_LAN1, 1) |
+ FIELD_PREP(STF_DPHY_ENABLE_LAN2, 1) |
+ FIELD_PREP(STF_DPHY_ENABLE_LAN3, 1) |
+ FIELD_PREP(STF_DPHY_LANE_SWAP_CLK, info->maps[0]) |
+ FIELD_PREP(STF_DPHY_LANE_SWAP_CLK1, info->maps[5]) |
+ FIELD_PREP(STF_DPHY_LANE_SWAP_LAN0, info->maps[1]) |
+ FIELD_PREP(STF_DPHY_LANE_SWAP_LAN1, info->maps[2]),
+ dphy->regs + STF_DPHY_APBCFGSAIF_SYSCFG(188));
+
+ writel(FIELD_PREP(STF_DPHY_LANE_SWAP_LAN2, info->maps[3]) |
+ FIELD_PREP(STF_DPHY_LANE_SWAP_LAN3, info->maps[4]) |
+ FIELD_PREP(STF_DPHY_PRECOUNTER_IN_CLK, 8),
+ dphy->regs + STF_DPHY_APBCFGSAIF_SYSCFG(192));
+
+ writel(FIELD_PREP(STF_DPHY_PRECOUNTER_IN_CLK1, 8) |
+ FIELD_PREP(STF_DPHY_PRECOUNTER_IN_LAN0, 7) |
+ FIELD_PREP(STF_DPHY_PRECOUNTER_IN_LAN1, 7) |
+ FIELD_PREP(STF_DPHY_PRECOUNTER_IN_LAN2, 7),
+ dphy->regs + STF_DPHY_APBCFGSAIF_SYSCFG(196));
+
+ writel(FIELD_PREP(STF_DPHY_PRECOUNTER_IN_LAN3, 7),
+ dphy->regs + STF_DPHY_APBCFGSAIF_SYSCFG(200));
+
+ return 0;
+}
+
+static int stf_dphy_power_on(struct phy *phy)
+{
+ struct stf_dphy *dphy = phy_get_drvdata(phy);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dphy->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = regulator_enable(dphy->mipi_0p9);
+ if (ret) {
+ pm_runtime_put(dphy->dev);
+ return ret;
+ }
+
+ clk_set_rate(dphy->cfg_clk, 99000000);
+ clk_set_rate(dphy->ref_clk, 49500000);
+ clk_set_rate(dphy->tx_clk, 19800000);
+ reset_control_deassert(dphy->rstc);
+
+ return 0;
+}
+
+static int stf_dphy_power_off(struct phy *phy)
+{
+ struct stf_dphy *dphy = phy_get_drvdata(phy);
+
+ reset_control_assert(dphy->rstc);
+
+ regulator_disable(dphy->mipi_0p9);
+
+ pm_runtime_put_sync(dphy->dev);
+
+ return 0;
+}
+
+static const struct phy_ops stf_dphy_ops = {
+ .configure = stf_dphy_configure,
+ .power_on = stf_dphy_power_on,
+ .power_off = stf_dphy_power_off,
+};
+
+static int stf_dphy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct stf_dphy *dphy;
+
+ dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL);
+ if (!dphy)
+ return -ENOMEM;
+
+ dphy->info = of_device_get_match_data(&pdev->dev);
+
+ dev_set_drvdata(&pdev->dev, dphy);
+ dphy->dev = &pdev->dev;
+
+ dphy->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dphy->regs))
+ return PTR_ERR(dphy->regs);
+
+ dphy->cfg_clk = devm_clk_get(&pdev->dev, "cfg");
+ if (IS_ERR(dphy->cfg_clk))
+ return PTR_ERR(dphy->cfg_clk);
+
+ dphy->ref_clk = devm_clk_get(&pdev->dev, "ref");
+ if (IS_ERR(dphy->ref_clk))
+ return PTR_ERR(dphy->ref_clk);
+
+ dphy->tx_clk = devm_clk_get(&pdev->dev, "tx");
+ if (IS_ERR(dphy->tx_clk))
+ return PTR_ERR(dphy->tx_clk);
+
+ dphy->rstc = devm_reset_control_array_get_exclusive(&pdev->dev);
+ if (IS_ERR(dphy->rstc))
+ return PTR_ERR(dphy->rstc);
+
+ dphy->mipi_0p9 = devm_regulator_get(&pdev->dev, "mipi_0p9");
+ if (IS_ERR(dphy->mipi_0p9))
+ return PTR_ERR(dphy->mipi_0p9);
+
+ dphy->phy = devm_phy_create(&pdev->dev, NULL, &stf_dphy_ops);
+ if (IS_ERR(dphy->phy)) {
+ dev_err(&pdev->dev, "Failed to create PHY\n");
+ return PTR_ERR(dphy->phy);
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ phy_set_drvdata(dphy->phy, dphy);
+ phy_provider = devm_of_phy_provider_register(&pdev->dev,
+ of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct stf_dphy_info starfive_dphy_info = {
+ .maps = {4, 0, 1, 2, 3, 5},
+};
+
+static const struct of_device_id stf_dphy_dt_ids[] = {
+ {
+ .compatible = "starfive,jh7110-dphy-rx",
+ .data = &starfive_dphy_info,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, stf_dphy_dt_ids);
+
+static struct platform_driver stf_dphy_driver = {
+ .probe = stf_dphy_probe,
+ .driver = {
+ .name = "starfive-dphy-rx",
+ .of_match_table = stf_dphy_dt_ids,
+ },
+};
+module_platform_driver(stf_dphy_driver);
+
+MODULE_AUTHOR("Jack Zhu <jack.zhu@starfivetech.com>");
+MODULE_AUTHOR("Changhuang Liang <changhuang.liang@starfivetech.com>");
+MODULE_DESCRIPTION("StarFive JH7110 DPHY RX driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/starfive/phy-jh7110-pcie.c b/drivers/phy/starfive/phy-jh7110-pcie.c
new file mode 100644
index 000000000000..734c8e007727
--- /dev/null
+++ b/drivers/phy/starfive/phy-jh7110-pcie.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * StarFive JH7110 PCIe 2.0 PHY driver
+ *
+ * Copyright (C) 2023 StarFive Technology Co., Ltd.
+ * Author: Minda Chen <minda.chen@starfivetech.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define PCIE_KVCO_LEVEL_OFF 0x28
+#define PCIE_USB3_PHY_PLL_CTL_OFF 0x7c
+#define PCIE_KVCO_TUNE_SIGNAL_OFF 0x80
+#define PCIE_USB3_PHY_ENABLE BIT(4)
+#define PHY_KVCO_FINE_TUNE_LEVEL 0x91
+#define PHY_KVCO_FINE_TUNE_SIGNALS 0xc
+
+#define USB_PDRSTN_SPLIT BIT(17)
+
+#define PCIE_PHY_MODE BIT(20)
+#define PCIE_PHY_MODE_MASK GENMASK(21, 20)
+#define PCIE_USB3_BUS_WIDTH_MASK GENMASK(3, 2)
+#define PCIE_USB3_BUS_WIDTH BIT(3)
+#define PCIE_USB3_RATE_MASK GENMASK(6, 5)
+#define PCIE_USB3_RX_STANDBY_MASK BIT(7)
+#define PCIE_USB3_PHY_ENABLE BIT(4)
+
+struct jh7110_pcie_phy {
+ struct phy *phy;
+ struct regmap *stg_syscon;
+ struct regmap *sys_syscon;
+ void __iomem *regs;
+ u32 sys_phy_connect;
+ u32 stg_pcie_mode;
+ u32 stg_pcie_usb;
+ enum phy_mode mode;
+};
+
+static int phy_usb3_mode_set(struct jh7110_pcie_phy *data)
+{
+ if (!data->stg_syscon || !data->sys_syscon) {
+ dev_err(&data->phy->dev, "doesn't support usb3 mode\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(data->stg_syscon, data->stg_pcie_mode,
+ PCIE_PHY_MODE_MASK, PCIE_PHY_MODE);
+ regmap_update_bits(data->stg_syscon, data->stg_pcie_usb,
+ PCIE_USB3_BUS_WIDTH_MASK, 0);
+ regmap_update_bits(data->stg_syscon, data->stg_pcie_usb,
+ PCIE_USB3_PHY_ENABLE, PCIE_USB3_PHY_ENABLE);
+
+ /* Connect usb 3.0 phy mode */
+ regmap_update_bits(data->sys_syscon, data->sys_phy_connect,
+ USB_PDRSTN_SPLIT, 0);
+
+ /* Configuare spread-spectrum mode: down-spread-spectrum */
+ writel(PCIE_USB3_PHY_ENABLE, data->regs + PCIE_USB3_PHY_PLL_CTL_OFF);
+
+ return 0;
+}
+
+static void phy_pcie_mode_set(struct jh7110_pcie_phy *data)
+{
+ u32 val;
+
+ /* default is PCIe mode */
+ if (!data->stg_syscon || !data->sys_syscon)
+ return;
+
+ regmap_update_bits(data->stg_syscon, data->stg_pcie_mode,
+ PCIE_PHY_MODE_MASK, 0);
+ regmap_update_bits(data->stg_syscon, data->stg_pcie_usb,
+ PCIE_USB3_BUS_WIDTH_MASK,
+ PCIE_USB3_BUS_WIDTH);
+ regmap_update_bits(data->stg_syscon, data->stg_pcie_usb,
+ PCIE_USB3_PHY_ENABLE, 0);
+
+ regmap_update_bits(data->sys_syscon, data->sys_phy_connect,
+ USB_PDRSTN_SPLIT, 0);
+
+ val = readl(data->regs + PCIE_USB3_PHY_PLL_CTL_OFF);
+ val &= ~PCIE_USB3_PHY_ENABLE;
+ writel(val, data->regs + PCIE_USB3_PHY_PLL_CTL_OFF);
+}
+
+static void phy_kvco_gain_set(struct jh7110_pcie_phy *phy)
+{
+ /* PCIe Multi-PHY PLL KVCO Gain fine tune settings: */
+ writel(PHY_KVCO_FINE_TUNE_LEVEL, phy->regs + PCIE_KVCO_LEVEL_OFF);
+ writel(PHY_KVCO_FINE_TUNE_SIGNALS, phy->regs + PCIE_KVCO_TUNE_SIGNAL_OFF);
+}
+
+static int jh7110_pcie_phy_set_mode(struct phy *_phy,
+ enum phy_mode mode, int submode)
+{
+ struct jh7110_pcie_phy *phy = phy_get_drvdata(_phy);
+ int ret;
+
+ if (mode == phy->mode)
+ return 0;
+
+ switch (mode) {
+ case PHY_MODE_USB_HOST:
+ case PHY_MODE_USB_DEVICE:
+ case PHY_MODE_USB_OTG:
+ ret = phy_usb3_mode_set(phy);
+ if (ret)
+ return ret;
+ break;
+ case PHY_MODE_PCIE:
+ phy_pcie_mode_set(phy);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(&_phy->dev, "Changing phy mode to %d\n", mode);
+ phy->mode = mode;
+
+ return 0;
+}
+
+static const struct phy_ops jh7110_pcie_phy_ops = {
+ .set_mode = jh7110_pcie_phy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static int jh7110_pcie_phy_probe(struct platform_device *pdev)
+{
+ struct jh7110_pcie_phy *phy;
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ u32 args[2];
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(phy->regs))
+ return PTR_ERR(phy->regs);
+
+ phy->phy = devm_phy_create(dev, NULL, &jh7110_pcie_phy_ops);
+ if (IS_ERR(phy->phy))
+ return dev_err_probe(dev, PTR_ERR(phy->phy),
+ "Failed to map phy base\n");
+
+ phy->sys_syscon =
+ syscon_regmap_lookup_by_phandle_args(pdev->dev.of_node,
+ "starfive,sys-syscon",
+ 1, args);
+
+ if (!IS_ERR_OR_NULL(phy->sys_syscon))
+ phy->sys_phy_connect = args[0];
+ else
+ phy->sys_syscon = NULL;
+
+ phy->stg_syscon =
+ syscon_regmap_lookup_by_phandle_args(pdev->dev.of_node,
+ "starfive,stg-syscon",
+ 2, args);
+
+ if (!IS_ERR_OR_NULL(phy->stg_syscon)) {
+ phy->stg_pcie_mode = args[0];
+ phy->stg_pcie_usb = args[1];
+ } else {
+ phy->stg_syscon = NULL;
+ }
+
+ phy_kvco_gain_set(phy);
+
+ phy_set_drvdata(phy->phy, phy);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id jh7110_pcie_phy_of_match[] = {
+ { .compatible = "starfive,jh7110-pcie-phy" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, jh7110_pcie_phy_of_match);
+
+static struct platform_driver jh7110_pcie_phy_driver = {
+ .probe = jh7110_pcie_phy_probe,
+ .driver = {
+ .of_match_table = jh7110_pcie_phy_of_match,
+ .name = "jh7110-pcie-phy",
+ }
+};
+module_platform_driver(jh7110_pcie_phy_driver);
+
+MODULE_DESCRIPTION("StarFive JH7110 PCIe 2.0 PHY driver");
+MODULE_AUTHOR("Minda Chen <minda.chen@starfivetech.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/starfive/phy-jh7110-usb.c b/drivers/phy/starfive/phy-jh7110-usb.c
new file mode 100644
index 000000000000..633912f8a05d
--- /dev/null
+++ b/drivers/phy/starfive/phy-jh7110-usb.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * StarFive JH7110 USB 2.0 PHY driver
+ *
+ * Copyright (C) 2023 StarFive Technology Co., Ltd.
+ * Author: Minda Chen <minda.chen@starfivetech.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/usb/of.h>
+
+#define USB_125M_CLK_RATE 125000000
+#define USB_LS_KEEPALIVE_OFF 0x4
+#define USB_LS_KEEPALIVE_ENABLE BIT(4)
+
+struct jh7110_usb2_phy {
+ struct phy *phy;
+ void __iomem *regs;
+ struct clk *usb_125m_clk;
+ struct clk *app_125m;
+ enum phy_mode mode;
+};
+
+static void usb2_set_ls_keepalive(struct jh7110_usb2_phy *phy, bool set)
+{
+ unsigned int val;
+
+ /* Host mode enable the LS speed keep-alive signal */
+ val = readl(phy->regs + USB_LS_KEEPALIVE_OFF);
+ if (set)
+ val |= USB_LS_KEEPALIVE_ENABLE;
+ else
+ val &= ~USB_LS_KEEPALIVE_ENABLE;
+
+ writel(val, phy->regs + USB_LS_KEEPALIVE_OFF);
+}
+
+static int usb2_phy_set_mode(struct phy *_phy,
+ enum phy_mode mode, int submode)
+{
+ struct jh7110_usb2_phy *phy = phy_get_drvdata(_phy);
+
+ switch (mode) {
+ case PHY_MODE_USB_HOST:
+ case PHY_MODE_USB_DEVICE:
+ case PHY_MODE_USB_OTG:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (mode != phy->mode) {
+ dev_dbg(&_phy->dev, "Changing phy to %d\n", mode);
+ phy->mode = mode;
+ usb2_set_ls_keepalive(phy, (mode != PHY_MODE_USB_DEVICE));
+ }
+
+ return 0;
+}
+
+static int jh7110_usb2_phy_init(struct phy *_phy)
+{
+ struct jh7110_usb2_phy *phy = phy_get_drvdata(_phy);
+ int ret;
+
+ ret = clk_set_rate(phy->usb_125m_clk, USB_125M_CLK_RATE);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(phy->app_125m);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int jh7110_usb2_phy_exit(struct phy *_phy)
+{
+ struct jh7110_usb2_phy *phy = phy_get_drvdata(_phy);
+
+ clk_disable_unprepare(phy->app_125m);
+
+ return 0;
+}
+
+static const struct phy_ops jh7110_usb2_phy_ops = {
+ .init = jh7110_usb2_phy_init,
+ .exit = jh7110_usb2_phy_exit,
+ .set_mode = usb2_phy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static int jh7110_usb_phy_probe(struct platform_device *pdev)
+{
+ struct jh7110_usb2_phy *phy;
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->usb_125m_clk = devm_clk_get(dev, "125m");
+ if (IS_ERR(phy->usb_125m_clk))
+ return dev_err_probe(dev, PTR_ERR(phy->usb_125m_clk),
+ "Failed to get 125m clock\n");
+
+ phy->app_125m = devm_clk_get(dev, "app_125m");
+ if (IS_ERR(phy->app_125m))
+ return dev_err_probe(dev, PTR_ERR(phy->app_125m),
+ "Failed to get app 125m clock\n");
+
+ phy->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(phy->regs))
+ return dev_err_probe(dev, PTR_ERR(phy->regs),
+ "Failed to map phy base\n");
+
+ phy->phy = devm_phy_create(dev, NULL, &jh7110_usb2_phy_ops);
+ if (IS_ERR(phy->phy))
+ return dev_err_probe(dev, PTR_ERR(phy->phy),
+ "Failed to create phy\n");
+
+ phy_set_drvdata(phy->phy, phy);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id jh7110_usb_phy_of_match[] = {
+ { .compatible = "starfive,jh7110-usb-phy" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, jh7110_usb_phy_of_match);
+
+static struct platform_driver jh7110_usb_phy_driver = {
+ .probe = jh7110_usb_phy_probe,
+ .driver = {
+ .of_match_table = jh7110_usb_phy_of_match,
+ .name = "jh7110-usb-phy",
+ }
+};
+module_platform_driver(jh7110_usb_phy_driver);
+
+MODULE_DESCRIPTION("StarFive JH7110 USB 2.0 PHY driver");
+MODULE_AUTHOR("Minda Chen <minda.chen@starfivetech.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/sunplus/phy-sunplus-usb2.c b/drivers/phy/sunplus/phy-sunplus-usb2.c
index 56de41091d63..0efe74ac9c6a 100644
--- a/drivers/phy/sunplus/phy-sunplus-usb2.c
+++ b/drivers/phy/sunplus/phy-sunplus-usb2.c
@@ -16,7 +16,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
diff --git a/drivers/phy/tegra/phy-tegra194-p2u.c b/drivers/phy/tegra/phy-tegra194-p2u.c
index 633e6b747275..f49b417c9eb6 100644
--- a/drivers/phy/tegra/phy-tegra194-p2u.c
+++ b/drivers/phy/tegra/phy-tegra194-p2u.c
@@ -11,8 +11,8 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#define P2U_CONTROL_CMN 0x74
#define P2U_CONTROL_CMN_ENABLE_L2_EXIT_RATE_CHANGE BIT(13)
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index a296b87dced1..142ebe0247cc 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -8,7 +8,7 @@
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/phy/tegra/xusb.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index 6286cf25a426..555b323f45da 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -465,9 +465,12 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
priv->regmap = syscon_node_to_regmap(node->parent);
if (IS_ERR(priv->regmap)) {
- ret = PTR_ERR(priv->regmap);
- dev_err(dev, "Failed to get syscon %d\n", ret);
- return ret;
+ priv->regmap = device_node_to_regmap(node);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ dev_err(dev, "Failed to get syscon %d\n", ret);
+ return ret;
+ }
}
ret = phy_gmii_sel_init_ports(priv);
diff --git a/drivers/phy/ti/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c
index 669c13d6e402..b4881cb34475 100644
--- a/drivers/phy/ti/phy-tusb1210.c
+++ b/drivers/phy/ti/phy-tusb1210.c
@@ -14,6 +14,7 @@
#include <linux/gpio/consumer.h>
#include <linux/phy/ulpi_phy.h>
#include <linux/power_supply.h>
+#include <linux/property.h>
#include <linux/workqueue.h>
#define TUSB1211_POWER_CONTROL 0x3d
diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c
index da50732625d1..6b265992d988 100644
--- a/drivers/phy/ti/phy-twl4030-usb.c
+++ b/drivers/phy/ti/phy-twl4030-usb.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/io.h>
diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c
index 8833680923a1..2559c6594cea 100644
--- a/drivers/phy/xilinx/phy-zynqmp.c
+++ b/drivers/phy/xilinx/phy-zynqmp.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <dt-bindings/phy/phy.h>
@@ -571,6 +572,10 @@ static int xpsgtr_phy_init(struct phy *phy)
mutex_lock(&gtr_dev->gtr_mutex);
+ /* Configure and enable the clock when peripheral phy_init call */
+ if (clk_prepare_enable(gtr_dev->clk[gtr_phy->lane]))
+ goto out;
+
/* Skip initialization if not required. */
if (!xpsgtr_phy_init_required(gtr_phy))
goto out;
@@ -615,9 +620,13 @@ out:
static int xpsgtr_phy_exit(struct phy *phy)
{
struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
gtr_phy->skip_phy_init = false;
+ /* Ensure that disable clock only, which configure for lane */
+ clk_disable_unprepare(gtr_dev->clk[gtr_phy->lane]);
+
return 0;
}
@@ -820,34 +829,23 @@ static struct phy *xpsgtr_xlate(struct device *dev,
* Power Management
*/
-static int __maybe_unused xpsgtr_suspend(struct device *dev)
+static int xpsgtr_runtime_suspend(struct device *dev)
{
struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
- unsigned int i;
/* Save the snapshot ICM_CFG registers. */
gtr_dev->saved_icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
gtr_dev->saved_icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
- for (i = 0; i < ARRAY_SIZE(gtr_dev->clk); i++)
- clk_disable_unprepare(gtr_dev->clk[i]);
-
return 0;
}
-static int __maybe_unused xpsgtr_resume(struct device *dev)
+static int xpsgtr_runtime_resume(struct device *dev)
{
struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
unsigned int icm_cfg0, icm_cfg1;
unsigned int i;
bool skip_phy_init;
- int err;
-
- for (i = 0; i < ARRAY_SIZE(gtr_dev->clk); i++) {
- err = clk_prepare_enable(gtr_dev->clk[i]);
- if (err)
- goto err_clk_put;
- }
icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
@@ -868,18 +866,10 @@ static int __maybe_unused xpsgtr_resume(struct device *dev)
gtr_dev->phys[i].skip_phy_init = skip_phy_init;
return 0;
-
-err_clk_put:
- while (i--)
- clk_disable_unprepare(gtr_dev->clk[i]);
-
- return err;
}
-static const struct dev_pm_ops xpsgtr_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(xpsgtr_suspend, xpsgtr_resume)
-};
-
+static DEFINE_RUNTIME_DEV_PM_OPS(xpsgtr_pm_ops, xpsgtr_runtime_suspend,
+ xpsgtr_runtime_resume, NULL);
/*
* Probe & Platform Driver
*/
@@ -887,7 +877,6 @@ static const struct dev_pm_ops xpsgtr_pm_ops = {
static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
{
unsigned int refclk;
- int ret;
for (refclk = 0; refclk < ARRAY_SIZE(gtr_dev->refclk_sscs); ++refclk) {
unsigned long rate;
@@ -898,19 +887,14 @@ static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
snprintf(name, sizeof(name), "ref%u", refclk);
clk = devm_clk_get_optional(gtr_dev->dev, name);
if (IS_ERR(clk)) {
- ret = dev_err_probe(gtr_dev->dev, PTR_ERR(clk),
- "Failed to get reference clock %u\n",
- refclk);
- goto err_clk_put;
+ return dev_err_probe(gtr_dev->dev, PTR_ERR(clk),
+ "Failed to get ref clock %u\n",
+ refclk);
}
if (!clk)
continue;
- ret = clk_prepare_enable(clk);
- if (ret)
- goto err_clk_put;
-
gtr_dev->clk[refclk] = clk;
/*
@@ -920,7 +904,10 @@ static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
rate = clk_get_rate(clk);
for (i = 0 ; i < ARRAY_SIZE(ssc_lookup); i++) {
- if (rate == ssc_lookup[i].refclk_rate) {
+ /* Allow an error of 100 ppm */
+ unsigned long error = ssc_lookup[i].refclk_rate / 10000;
+
+ if (abs(rate - ssc_lookup[i].refclk_rate) < error) {
gtr_dev->refclk_sscs[refclk] = &ssc_lookup[i];
break;
}
@@ -930,18 +917,11 @@ static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
dev_err(gtr_dev->dev,
"Invalid rate %lu for reference clock %u\n",
rate, refclk);
- ret = -EINVAL;
- goto err_clk_put;
+ return -EINVAL;
}
}
return 0;
-
-err_clk_put:
- while (refclk--)
- clk_disable_unprepare(gtr_dev->clk[refclk]);
-
- return ret;
}
static int xpsgtr_probe(struct platform_device *pdev)
@@ -950,7 +930,6 @@ static int xpsgtr_probe(struct platform_device *pdev)
struct xpsgtr_dev *gtr_dev;
struct phy_provider *provider;
unsigned int port;
- unsigned int i;
int ret;
gtr_dev = devm_kzalloc(&pdev->dev, sizeof(*gtr_dev), GFP_KERNEL);
@@ -990,8 +969,7 @@ static int xpsgtr_probe(struct platform_device *pdev)
phy = devm_phy_create(&pdev->dev, np, &xpsgtr_phyops);
if (IS_ERR(phy)) {
dev_err(&pdev->dev, "failed to create PHY\n");
- ret = PTR_ERR(phy);
- goto err_clk_put;
+ return PTR_ERR(phy);
}
gtr_phy->phy = phy;
@@ -1002,16 +980,30 @@ static int xpsgtr_probe(struct platform_device *pdev)
provider = devm_of_phy_provider_register(&pdev->dev, xpsgtr_xlate);
if (IS_ERR(provider)) {
dev_err(&pdev->dev, "registering provider failed\n");
- ret = PTR_ERR(provider);
- goto err_clk_put;
+ return PTR_ERR(provider);
}
+
+ pm_runtime_set_active(gtr_dev->dev);
+ pm_runtime_enable(gtr_dev->dev);
+
+ ret = pm_runtime_resume_and_get(gtr_dev->dev);
+ if (ret < 0) {
+ pm_runtime_disable(gtr_dev->dev);
+ return ret;
+ }
+
return 0;
+}
-err_clk_put:
- for (i = 0; i < ARRAY_SIZE(gtr_dev->clk); i++)
- clk_disable_unprepare(gtr_dev->clk[i]);
+static int xpsgtr_remove(struct platform_device *pdev)
+{
+ struct xpsgtr_dev *gtr_dev = platform_get_drvdata(pdev);
- return ret;
+ pm_runtime_disable(gtr_dev->dev);
+ pm_runtime_put_noidle(gtr_dev->dev);
+ pm_runtime_set_suspended(gtr_dev->dev);
+
+ return 0;
}
static const struct of_device_id xpsgtr_of_match[] = {
@@ -1023,10 +1015,11 @@ MODULE_DEVICE_TABLE(of, xpsgtr_of_match);
static struct platform_driver xpsgtr_driver = {
.probe = xpsgtr_probe,
+ .remove = xpsgtr_remove,
.driver = {
.name = "xilinx-psgtr",
.of_match_table = xpsgtr_of_match,
- .pm = &xpsgtr_pm_ops,
+ .pm = pm_ptr(&xpsgtr_pm_ops),
},
};
diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
index 382793e73a60..f7dfa0e785fd 100644
--- a/drivers/platform/mellanox/Kconfig
+++ b/drivers/platform/mellanox/Kconfig
@@ -60,6 +60,7 @@ config MLXBF_BOOTCTL
tristate "Mellanox BlueField Firmware Boot Control driver"
depends on ARM64
depends on ACPI
+ depends on NET
help
The Mellanox BlueField firmware implements functionality to
request swapping the primary and alternate eMMC boot partition,
@@ -80,8 +81,8 @@ config MLXBF_PMC
config NVSW_SN2201
tristate "Nvidia SN2201 platform driver support"
- depends on HWMON
- depends on I2C
+ depends on HWMON && I2C
+ depends on ACPI || COMPILE_TEST
select REGMAP_I2C
help
This driver provides support for the Nvidia SN2201 platform.
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index be967d797c28..2d4bbe99959e 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -191,6 +191,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_smgen_events[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = {
+ { 0x0, "DISABLE" },
{ 0xa0, "TPIO_DATA_BEAT" },
{ 0xa1, "TDMA_DATA_BEAT" },
{ 0xa2, "MAP_DATA_BEAT" },
@@ -214,6 +215,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = {
+ { 0x0, "DISABLE" },
{ 0xa0, "TPIO_DATA_BEAT" },
{ 0xa1, "TDMA_DATA_BEAT" },
{ 0xa2, "MAP_DATA_BEAT" },
@@ -246,6 +248,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = {
+ { 0x0, "DISABLE" },
{ 0x100, "ECC_SINGLE_ERROR_CNT" },
{ 0x104, "ECC_DOUBLE_ERROR_CNT" },
{ 0x114, "SERR_INJ" },
@@ -258,6 +261,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = {
+ { 0x0, "DISABLE" },
{ 0xc0, "RXREQ_MSS" },
{ 0xc1, "RXDAT_MSS" },
{ 0xc2, "TXRSP_MSS" },
@@ -265,6 +269,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = {
+ { 0x0, "DISABLE" },
{ 0x45, "HNF_REQUESTS" },
{ 0x46, "HNF_REJECTS" },
{ 0x47, "ALL_BUSY" },
@@ -323,6 +328,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = {
};
static const struct mlxbf_pmc_events mlxbf_pmc_hnfnet_events[] = {
+ { 0x0, "DISABLE" },
{ 0x12, "CDN_REQ" },
{ 0x13, "DDN_REQ" },
{ 0x14, "NDN_REQ" },
@@ -892,7 +898,7 @@ static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
uint64_t *result)
{
uint32_t perfcfg_offset, perfval_offset;
- uint64_t perfmon_cfg, perfevt, perfctl;
+ uint64_t perfmon_cfg, perfevt;
if (cnt_num >= pmc->block[blk_num].counters)
return -EINVAL;
@@ -906,25 +912,6 @@ static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
/* Set counter in "read" mode */
perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
- MLXBF_PMC_PERFCTL);
- perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
- perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
-
- if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset,
- MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
- return -EFAULT;
-
- /* Check if the counter is enabled */
-
- if (mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset,
- MLXBF_PMC_READ_REG_64, &perfctl))
- return -EFAULT;
-
- if (!FIELD_GET(MLXBF_PMC_PERFCTL_EN0, perfctl))
- return -EINVAL;
-
- /* Set counter in "read" mode */
- perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
MLXBF_PMC_PERFEVT);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
@@ -1008,7 +995,7 @@ static ssize_t mlxbf_pmc_counter_show(struct device *dev,
} else
return -EINVAL;
- return sprintf(buf, "0x%llx\n", value);
+ return sysfs_emit(buf, "0x%llx\n", value);
}
/* Store function for "counter" sysfs files */
@@ -1078,13 +1065,13 @@ static ssize_t mlxbf_pmc_event_show(struct device *dev,
err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num);
if (err)
- return sprintf(buf, "No event being monitored\n");
+ return sysfs_emit(buf, "No event being monitored\n");
evt_name = mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num);
if (!evt_name)
return -EINVAL;
- return sprintf(buf, "0x%llx: %s\n", evt_num, evt_name);
+ return sysfs_emit(buf, "0x%llx: %s\n", evt_num, evt_name);
}
/* Store function for "event" sysfs files */
@@ -1139,9 +1126,9 @@ static ssize_t mlxbf_pmc_event_list_show(struct device *dev,
return -EINVAL;
for (i = 0, buf[0] = '\0'; i < size; ++i) {
- len += sprintf(e_info, "0x%x: %s\n", events[i].evt_num,
- events[i].evt_name);
- if (len > PAGE_SIZE)
+ len += snprintf(e_info, sizeof(e_info), "0x%x: %s\n",
+ events[i].evt_num, events[i].evt_name);
+ if (len >= PAGE_SIZE)
break;
strcat(buf, e_info);
ret = len;
@@ -1168,7 +1155,7 @@ static ssize_t mlxbf_pmc_enable_show(struct device *dev,
value = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_CFG_EN, perfcnt_cfg);
- return sprintf(buf, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
/* Store function for "enable" sysfs files - only for l3cache */
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index b600b77d91ef..fd38d8c8371e 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -53,12 +53,13 @@
struct mlxbf_tmfifo;
/**
- * mlxbf_tmfifo_vring - Structure of the TmFifo virtual ring
+ * struct mlxbf_tmfifo_vring - Structure of the TmFifo virtual ring
* @va: virtual address of the ring
* @dma: dma address of the ring
* @vq: pointer to the virtio virtqueue
* @desc: current descriptor of the pending packet
* @desc_head: head descriptor of the pending packet
+ * @drop_desc: dummy desc for packet dropping
* @cur_len: processed length of the current descriptor
* @rem_len: remaining length of the pending packet
* @pkt_len: total length of the pending packet
@@ -75,6 +76,7 @@ struct mlxbf_tmfifo_vring {
struct virtqueue *vq;
struct vring_desc *desc;
struct vring_desc *desc_head;
+ struct vring_desc drop_desc;
int cur_len;
int rem_len;
u32 pkt_len;
@@ -86,6 +88,14 @@ struct mlxbf_tmfifo_vring {
struct mlxbf_tmfifo *fifo;
};
+/* Check whether vring is in drop mode. */
+#define IS_VRING_DROP(_r) ({ \
+ typeof(_r) (r) = (_r); \
+ (r->desc_head == &r->drop_desc ? true : false); })
+
+/* A stub length to drop maximum length packet. */
+#define VRING_DROP_DESC_MAX_LEN GENMASK(15, 0)
+
/* Interrupt types. */
enum {
MLXBF_TM_RX_LWM_IRQ,
@@ -103,12 +113,13 @@ enum {
};
/**
- * mlxbf_tmfifo_vdev - Structure of the TmFifo virtual device
+ * struct mlxbf_tmfifo_vdev - Structure of the TmFifo virtual device
* @vdev: virtio device, in which the vdev.id.device field has the
* VIRTIO_ID_xxx id to distinguish the virtual device.
* @status: status of the device
* @features: supported features of the device
* @vrings: array of tmfifo vrings of this device
+ * @config: non-anonymous union for cons and net
* @config.cons: virtual console config -
* select if vdev.id.device is VIRTIO_ID_CONSOLE
* @config.net: virtual network config -
@@ -128,7 +139,7 @@ struct mlxbf_tmfifo_vdev {
};
/**
- * mlxbf_tmfifo_irq_info - Structure of the interrupt information
+ * struct mlxbf_tmfifo_irq_info - Structure of the interrupt information
* @fifo: pointer to the tmfifo structure
* @irq: interrupt number
* @index: index into the interrupt array
@@ -140,7 +151,7 @@ struct mlxbf_tmfifo_irq_info {
};
/**
- * mlxbf_tmfifo_io - Structure of the TmFifo IO resource (for both rx & tx)
+ * struct mlxbf_tmfifo_io - Structure of the TmFifo IO resource (for both rx & tx)
* @ctl: control register offset (TMFIFO_RX_CTL / TMFIFO_TX_CTL)
* @sts: status register offset (TMFIFO_RX_STS / TMFIFO_TX_STS)
* @data: data register offset (TMFIFO_RX_DATA / TMFIFO_TX_DATA)
@@ -152,7 +163,7 @@ struct mlxbf_tmfifo_io {
};
/**
- * mlxbf_tmfifo - Structure of the TmFifo
+ * struct mlxbf_tmfifo - Structure of the TmFifo
* @vdev: array of the virtual devices running over the TmFifo
* @lock: lock to protect the TmFifo access
* @res0: mapped resource block 0
@@ -188,7 +199,7 @@ struct mlxbf_tmfifo {
};
/**
- * mlxbf_tmfifo_msg_hdr - Structure of the TmFifo message header
+ * struct mlxbf_tmfifo_msg_hdr - Structure of the TmFifo message header
* @type: message type
* @len: payload length in network byte order. Messages sent into the FIFO
* will be read by the other side as data stream in the same byte order.
@@ -198,6 +209,7 @@ struct mlxbf_tmfifo {
struct mlxbf_tmfifo_msg_hdr {
u8 type;
__be16 len;
+ /* private: */
u8 unused[5];
} __packed __aligned(sizeof(u64));
@@ -214,7 +226,7 @@ static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = {
static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr";
/* Maximum L2 header length. */
-#define MLXBF_TMFIFO_NET_L2_OVERHEAD 36
+#define MLXBF_TMFIFO_NET_L2_OVERHEAD (ETH_HLEN + VLAN_HLEN)
/* Supported virtio-net features. */
#define MLXBF_TMFIFO_NET_FEATURES \
@@ -262,6 +274,7 @@ static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo,
vring->align = SMP_CACHE_BYTES;
vring->index = i;
vring->vdev_id = tm_vdev->vdev.id.device;
+ vring->drop_desc.len = VRING_DROP_DESC_MAX_LEN;
dev = &tm_vdev->vdev.dev;
size = vring_size(vring->num, vring->align);
@@ -367,7 +380,7 @@ static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring,
return len;
}
-static void mlxbf_tmfifo_release_pending_pkt(struct mlxbf_tmfifo_vring *vring)
+static void mlxbf_tmfifo_release_pkt(struct mlxbf_tmfifo_vring *vring)
{
struct vring_desc *desc_head;
u32 len = 0;
@@ -596,19 +609,25 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
if (vring->cur_len + sizeof(u64) <= len) {
/* The whole word. */
- if (is_rx)
- memcpy(addr + vring->cur_len, &data, sizeof(u64));
- else
- memcpy(&data, addr + vring->cur_len, sizeof(u64));
+ if (!IS_VRING_DROP(vring)) {
+ if (is_rx)
+ memcpy(addr + vring->cur_len, &data,
+ sizeof(u64));
+ else
+ memcpy(&data, addr + vring->cur_len,
+ sizeof(u64));
+ }
vring->cur_len += sizeof(u64);
} else {
/* Leftover bytes. */
- if (is_rx)
- memcpy(addr + vring->cur_len, &data,
- len - vring->cur_len);
- else
- memcpy(&data, addr + vring->cur_len,
- len - vring->cur_len);
+ if (!IS_VRING_DROP(vring)) {
+ if (is_rx)
+ memcpy(addr + vring->cur_len, &data,
+ len - vring->cur_len);
+ else
+ memcpy(&data, addr + vring->cur_len,
+ len - vring->cur_len);
+ }
vring->cur_len = len;
}
@@ -625,13 +644,14 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
* flag is set.
*/
static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
- struct vring_desc *desc,
+ struct vring_desc **desc,
bool is_rx, bool *vring_change)
{
struct mlxbf_tmfifo *fifo = vring->fifo;
struct virtio_net_config *config;
struct mlxbf_tmfifo_msg_hdr hdr;
int vdev_id, hdr_len;
+ bool drop_rx = false;
/* Read/Write packet header. */
if (is_rx) {
@@ -651,8 +671,8 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
if (ntohs(hdr.len) >
__virtio16_to_cpu(virtio_legacy_is_little_endian(),
config->mtu) +
- MLXBF_TMFIFO_NET_L2_OVERHEAD)
- return;
+ MLXBF_TMFIFO_NET_L2_OVERHEAD)
+ drop_rx = true;
} else {
vdev_id = VIRTIO_ID_CONSOLE;
hdr_len = 0;
@@ -667,16 +687,25 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
if (!tm_dev2)
return;
- vring->desc = desc;
+ vring->desc = *desc;
vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX];
*vring_change = true;
}
+
+ if (drop_rx && !IS_VRING_DROP(vring)) {
+ if (vring->desc_head)
+ mlxbf_tmfifo_release_pkt(vring);
+ *desc = &vring->drop_desc;
+ vring->desc_head = *desc;
+ vring->desc = *desc;
+ }
+
vring->pkt_len = ntohs(hdr.len) + hdr_len;
} else {
/* Network virtio has an extra header. */
hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ?
sizeof(struct virtio_net_hdr) : 0;
- vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, desc);
+ vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, *desc);
hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ?
VIRTIO_ID_NET : VIRTIO_ID_CONSOLE;
hdr.len = htons(vring->pkt_len - hdr_len);
@@ -709,15 +738,23 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
/* Get the descriptor of the next packet. */
if (!vring->desc) {
desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx);
- if (!desc)
- return false;
+ if (!desc) {
+ /* Drop next Rx packet to avoid stuck. */
+ if (is_rx) {
+ desc = &vring->drop_desc;
+ vring->desc_head = desc;
+ vring->desc = desc;
+ } else {
+ return false;
+ }
+ }
} else {
desc = vring->desc;
}
/* Beginning of a packet. Start to Rx/Tx packet header. */
if (vring->pkt_len == 0) {
- mlxbf_tmfifo_rxtx_header(vring, desc, is_rx, &vring_change);
+ mlxbf_tmfifo_rxtx_header(vring, &desc, is_rx, &vring_change);
(*avail)--;
/* Return if new packet is for another ring. */
@@ -743,17 +780,24 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
vring->rem_len -= len;
/* Get the next desc on the chain. */
- if (vring->rem_len > 0 &&
+ if (!IS_VRING_DROP(vring) && vring->rem_len > 0 &&
(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) {
idx = virtio16_to_cpu(vdev, desc->next);
desc = &vr->desc[idx];
goto mlxbf_tmfifo_desc_done;
}
- /* Done and release the pending packet. */
- mlxbf_tmfifo_release_pending_pkt(vring);
+ /* Done and release the packet. */
desc = NULL;
fifo->vring[is_rx] = NULL;
+ if (!IS_VRING_DROP(vring)) {
+ mlxbf_tmfifo_release_pkt(vring);
+ } else {
+ vring->pkt_len = 0;
+ vring->desc_head = NULL;
+ vring->desc = NULL;
+ return false;
+ }
/*
* Make sure the load/store are in order before
@@ -933,7 +977,7 @@ static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev)
/* Release the pending packet. */
if (vring->desc)
- mlxbf_tmfifo_release_pending_pkt(vring);
+ mlxbf_tmfifo_release_pkt(vring);
vq = vring->vq;
if (vq) {
vring->vq = NULL;
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index fdf7da06af30..d85d895fee89 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -480,6 +480,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
{
.callback = dmi_matched,
+ .ident = "ASUS ROG FLOW X16",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GV601V"),
+ },
+ .driver_data = &quirk_asus_tablet_mode,
+ },
+ {
+ .callback = dmi_matched,
.ident = "ASUS VivoBook E410MA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
index 8c4f9e12f018..5798b49ddaba 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
@@ -659,7 +659,7 @@ static int hp_init_bios_package_attribute(enum hp_wmi_data_type attr_type,
const char *guid, int min_elements,
int instance_id)
{
- struct kobject *attr_name_kobj;
+ struct kobject *attr_name_kobj, *duplicate;
union acpi_object *elements;
struct kset *temp_kset;
@@ -704,8 +704,11 @@ static int hp_init_bios_package_attribute(enum hp_wmi_data_type attr_type,
}
/* All duplicate attributes found are ignored */
- if (kset_find_obj(temp_kset, str_value)) {
+ duplicate = kset_find_obj(temp_kset, str_value);
+ if (duplicate) {
pr_debug("Duplicate attribute name found - %s\n", str_value);
+ /* kset_find_obj() returns a reference */
+ kobject_put(duplicate);
goto pack_attr_exit;
}
@@ -768,7 +771,7 @@ static int hp_init_bios_buffer_attribute(enum hp_wmi_data_type attr_type,
const char *guid, int min_elements,
int instance_id)
{
- struct kobject *attr_name_kobj;
+ struct kobject *attr_name_kobj, *duplicate;
struct kset *temp_kset;
char str[MAX_BUFF_SIZE];
@@ -794,8 +797,11 @@ static int hp_init_bios_buffer_attribute(enum hp_wmi_data_type attr_type,
temp_kset = bioscfg_drv.main_dir_kset;
/* All duplicate attributes found are ignored */
- if (kset_find_obj(temp_kset, str)) {
+ duplicate = kset_find_obj(temp_kset, str);
+ if (duplicate) {
pr_debug("Duplicate attribute name found - %s\n", str);
+ /* kset_find_obj() returns a reference */
+ kobject_put(duplicate);
goto buff_attr_exit;
}
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index e76e5458db35..8ebb7be52ee7 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -1548,7 +1548,13 @@ static const struct dev_pm_ops hp_wmi_pm_ops = {
.restore = hp_wmi_resume_handler,
};
-static struct platform_driver hp_wmi_driver = {
+/*
+ * hp_wmi_bios_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver hp_wmi_driver __refdata = {
.driver = {
.name = "hp-wmi",
.pm = &hp_wmi_pm_ops,
diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c
index 1061eb7ec399..43c864add778 100644
--- a/drivers/platform/x86/intel/ifs/runtest.c
+++ b/drivers/platform/x86/intel/ifs/runtest.c
@@ -331,14 +331,15 @@ int do_core_test(int cpu, struct device *dev)
switch (test->test_num) {
case IFS_TYPE_SAF:
if (!ifsd->loaded)
- return -EPERM;
- ifs_test_core(cpu, dev);
+ ret = -EPERM;
+ else
+ ifs_test_core(cpu, dev);
break;
case IFS_TYPE_ARRAY_BIST:
ifs_array_test_core(cpu, dev);
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
out:
cpus_read_unlock();
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 6851d10d6582..a68df4133403 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -231,19 +232,15 @@ static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
/* Wait till scu status is busy */
static inline int busy_loop(struct intel_scu_ipc_dev *scu)
{
- unsigned long end = jiffies + IPC_TIMEOUT;
-
- do {
- u32 status;
-
- status = ipc_read_status(scu);
- if (!(status & IPC_STATUS_BUSY))
- return (status & IPC_STATUS_ERR) ? -EIO : 0;
+ u8 status;
+ int err;
- usleep_range(50, 100);
- } while (time_before(jiffies, end));
+ err = readx_poll_timeout(ipc_read_status, scu, status, !(status & IPC_STATUS_BUSY),
+ 100, jiffies_to_usecs(IPC_TIMEOUT));
+ if (err)
+ return err;
- return -ETIMEDOUT;
+ return (status & IPC_STATUS_ERR) ? -EIO : 0;
}
/* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
@@ -251,10 +248,12 @@ static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
{
int status;
- if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT))
- return -ETIMEDOUT;
+ wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT);
status = ipc_read_status(scu);
+ if (status & IPC_STATUS_BUSY)
+ return -ETIMEDOUT;
+
if (status & IPC_STATUS_ERR)
return -EIO;
@@ -266,6 +265,24 @@ static int intel_scu_ipc_check_status(struct intel_scu_ipc_dev *scu)
return scu->irq > 0 ? ipc_wait_for_interrupt(scu) : busy_loop(scu);
}
+static struct intel_scu_ipc_dev *intel_scu_ipc_get(struct intel_scu_ipc_dev *scu)
+{
+ u8 status;
+
+ if (!scu)
+ scu = ipcdev;
+ if (!scu)
+ return ERR_PTR(-ENODEV);
+
+ status = ipc_read_status(scu);
+ if (status & IPC_STATUS_BUSY) {
+ dev_dbg(&scu->dev, "device is busy\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ return scu;
+}
+
/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
u32 count, u32 op, u32 id)
@@ -279,11 +296,10 @@ static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
memset(cbuf, 0, sizeof(cbuf));
mutex_lock(&ipclock);
- if (!scu)
- scu = ipcdev;
- if (!scu) {
+ scu = intel_scu_ipc_get(scu);
+ if (IS_ERR(scu)) {
mutex_unlock(&ipclock);
- return -ENODEV;
+ return PTR_ERR(scu);
}
for (nc = 0; nc < count; nc++, offset += 2) {
@@ -438,13 +454,12 @@ int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
int err;
mutex_lock(&ipclock);
- if (!scu)
- scu = ipcdev;
- if (!scu) {
+ scu = intel_scu_ipc_get(scu);
+ if (IS_ERR(scu)) {
mutex_unlock(&ipclock);
- return -ENODEV;
+ return PTR_ERR(scu);
}
- scu = ipcdev;
+
cmdval = sub << 12 | cmd;
ipc_command(scu, cmdval);
err = intel_scu_ipc_check_status(scu);
@@ -484,11 +499,10 @@ int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
return -EINVAL;
mutex_lock(&ipclock);
- if (!scu)
- scu = ipcdev;
- if (!scu) {
+ scu = intel_scu_ipc_get(scu);
+ if (IS_ERR(scu)) {
mutex_unlock(&ipclock);
- return -ENODEV;
+ return PTR_ERR(scu);
}
memcpy(inbuf, in, inlen);
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index 79346881cadb..aee869769843 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -1248,6 +1248,24 @@ static void tlmi_release_attr(void)
kset_unregister(tlmi_priv.authentication_kset);
}
+static int tlmi_validate_setting_name(struct kset *attribute_kset, char *name)
+{
+ struct kobject *duplicate;
+
+ if (!strcmp(name, "Reserved"))
+ return -EINVAL;
+
+ duplicate = kset_find_obj(attribute_kset, name);
+ if (duplicate) {
+ pr_debug("Duplicate attribute name found - %s\n", name);
+ /* kset_find_obj() returns a reference */
+ kobject_put(duplicate);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
static int tlmi_sysfs_init(void)
{
int i, ret;
@@ -1276,10 +1294,8 @@ static int tlmi_sysfs_init(void)
continue;
/* check for duplicate or reserved values */
- if (kset_find_obj(tlmi_priv.attribute_kset, tlmi_priv.setting[i]->display_name) ||
- !strcmp(tlmi_priv.setting[i]->display_name, "Reserved")) {
- pr_debug("duplicate or reserved attribute name found - %s\n",
- tlmi_priv.setting[i]->display_name);
+ if (tlmi_validate_setting_name(tlmi_priv.attribute_kset,
+ tlmi_priv.setting[i]->display_name) < 0) {
kfree(tlmi_priv.setting[i]->possible_values);
kfree(tlmi_priv.setting[i]);
tlmi_priv.setting[i] = NULL;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index d70c89d32534..41584427dc32 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -4116,9 +4116,11 @@ static void hotkey_resume(void)
{
tpacpi_disable_brightness_delay();
+ mutex_lock(&hotkey_mutex);
if (hotkey_status_set(true) < 0 ||
hotkey_mask_set(hotkey_acpi_mask) < 0)
pr_err("error while attempting to reset the event firmware interface\n");
+ mutex_unlock(&hotkey_mutex);
tpacpi_send_radiosw_update();
tpacpi_input_send_tabletsw();
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index f9301a9382e7..0c6733772698 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -42,6 +42,21 @@ static const struct ts_dmi_data archos_101_cesium_educ_data = {
.properties = archos_101_cesium_educ_props,
};
+static const struct property_entry bush_bush_windows_tablet_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1850),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-bush-bush-windows-tablet.fw"),
+ { }
+};
+
+static const struct ts_dmi_data bush_bush_windows_tablet_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = bush_bush_windows_tablet_props,
+};
+
static const struct property_entry chuwi_hi8_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1665),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
@@ -756,6 +771,21 @@ static const struct ts_dmi_data pipo_w11_data = {
.properties = pipo_w11_props,
};
+static const struct property_entry positivo_c4128b_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 13),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1915),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1269),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-positivo-c4128b.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data positivo_c4128b_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = positivo_c4128b_props,
+};
+
static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 32),
PROPERTY_ENTRY_U32("touchscreen-min-y", 16),
@@ -1071,6 +1101,13 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Bush Windows tablet */
+ .driver_data = (void *)&bush_bush_windows_tablet_data,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Bush Windows tablet"),
+ },
+ },
+ {
/* Chuwi Hi8 */
.driver_data = (void *)&chuwi_hi8_data,
.matches = {
@@ -1481,6 +1518,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Positivo C4128B */
+ .driver_data = (void *)&positivo_c4128b_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C4128B-1"),
+ },
+ },
+ {
/* Point of View mobii wintab p800w (v2.0) */
.driver_data = (void *)&pov_mobii_wintab_p800w_v20_data,
.matches = {
diff --git a/drivers/genpd/Makefile b/drivers/pmdomain/Makefile
index 666753676e5c..666753676e5c 100644
--- a/drivers/genpd/Makefile
+++ b/drivers/pmdomain/Makefile
diff --git a/drivers/genpd/actions/Makefile b/drivers/pmdomain/actions/Makefile
index 7e8aa473d12d..7e8aa473d12d 100644
--- a/drivers/genpd/actions/Makefile
+++ b/drivers/pmdomain/actions/Makefile
diff --git a/drivers/genpd/actions/owl-sps-helper.c b/drivers/pmdomain/actions/owl-sps-helper.c
index e3f36603dd53..e3f36603dd53 100644
--- a/drivers/genpd/actions/owl-sps-helper.c
+++ b/drivers/pmdomain/actions/owl-sps-helper.c
diff --git a/drivers/genpd/actions/owl-sps.c b/drivers/pmdomain/actions/owl-sps.c
index 73a9e0bb7e8e..73a9e0bb7e8e 100644
--- a/drivers/genpd/actions/owl-sps.c
+++ b/drivers/pmdomain/actions/owl-sps.c
diff --git a/drivers/genpd/amlogic/Makefile b/drivers/pmdomain/amlogic/Makefile
index 3d58abd574f9..3d58abd574f9 100644
--- a/drivers/genpd/amlogic/Makefile
+++ b/drivers/pmdomain/amlogic/Makefile
diff --git a/drivers/genpd/amlogic/meson-ee-pwrc.c b/drivers/pmdomain/amlogic/meson-ee-pwrc.c
index cfb796d40d9d..cfb796d40d9d 100644
--- a/drivers/genpd/amlogic/meson-ee-pwrc.c
+++ b/drivers/pmdomain/amlogic/meson-ee-pwrc.c
diff --git a/drivers/genpd/amlogic/meson-gx-pwrc-vpu.c b/drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c
index 33df520eab95..33df520eab95 100644
--- a/drivers/genpd/amlogic/meson-gx-pwrc-vpu.c
+++ b/drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c
diff --git a/drivers/genpd/amlogic/meson-secure-pwrc.c b/drivers/pmdomain/amlogic/meson-secure-pwrc.c
index 89c881c56cd7..89c881c56cd7 100644
--- a/drivers/genpd/amlogic/meson-secure-pwrc.c
+++ b/drivers/pmdomain/amlogic/meson-secure-pwrc.c
diff --git a/drivers/genpd/apple/Makefile b/drivers/pmdomain/apple/Makefile
index 53665af630be..53665af630be 100644
--- a/drivers/genpd/apple/Makefile
+++ b/drivers/pmdomain/apple/Makefile
diff --git a/drivers/genpd/apple/pmgr-pwrstate.c b/drivers/pmdomain/apple/pmgr-pwrstate.c
index d62a776c89a1..d62a776c89a1 100644
--- a/drivers/genpd/apple/pmgr-pwrstate.c
+++ b/drivers/pmdomain/apple/pmgr-pwrstate.c
diff --git a/drivers/genpd/bcm/Makefile b/drivers/pmdomain/bcm/Makefile
index 6bfbe4e4db13..6bfbe4e4db13 100644
--- a/drivers/genpd/bcm/Makefile
+++ b/drivers/pmdomain/bcm/Makefile
diff --git a/drivers/genpd/bcm/bcm-pmb.c b/drivers/pmdomain/bcm/bcm-pmb.c
index a72ba26ecf9d..a72ba26ecf9d 100644
--- a/drivers/genpd/bcm/bcm-pmb.c
+++ b/drivers/pmdomain/bcm/bcm-pmb.c
diff --git a/drivers/genpd/bcm/bcm2835-power.c b/drivers/pmdomain/bcm/bcm2835-power.c
index 1a179d4e011c..1a179d4e011c 100644
--- a/drivers/genpd/bcm/bcm2835-power.c
+++ b/drivers/pmdomain/bcm/bcm2835-power.c
diff --git a/drivers/genpd/bcm/bcm63xx-power.c b/drivers/pmdomain/bcm/bcm63xx-power.c
index 98b0c2430dbc..98b0c2430dbc 100644
--- a/drivers/genpd/bcm/bcm63xx-power.c
+++ b/drivers/pmdomain/bcm/bcm63xx-power.c
diff --git a/drivers/genpd/bcm/raspberrypi-power.c b/drivers/pmdomain/bcm/raspberrypi-power.c
index 06196ebfe03b..06196ebfe03b 100644
--- a/drivers/genpd/bcm/raspberrypi-power.c
+++ b/drivers/pmdomain/bcm/raspberrypi-power.c
diff --git a/drivers/genpd/imx/Makefile b/drivers/pmdomain/imx/Makefile
index 52d2629014a7..52d2629014a7 100644
--- a/drivers/genpd/imx/Makefile
+++ b/drivers/pmdomain/imx/Makefile
diff --git a/drivers/genpd/imx/gpc.c b/drivers/pmdomain/imx/gpc.c
index 90a8b2c0676f..90a8b2c0676f 100644
--- a/drivers/genpd/imx/gpc.c
+++ b/drivers/pmdomain/imx/gpc.c
diff --git a/drivers/genpd/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c
index fbd3d92f8cd8..fbd3d92f8cd8 100644
--- a/drivers/genpd/imx/gpcv2.c
+++ b/drivers/pmdomain/imx/gpcv2.c
diff --git a/drivers/genpd/imx/imx8m-blk-ctrl.c b/drivers/pmdomain/imx/imx8m-blk-ctrl.c
index cc5ef6e2f0a8..cc5ef6e2f0a8 100644
--- a/drivers/genpd/imx/imx8m-blk-ctrl.c
+++ b/drivers/pmdomain/imx/imx8m-blk-ctrl.c
diff --git a/drivers/genpd/imx/imx8mp-blk-ctrl.c b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
index c6ac32c1a8c1..c6ac32c1a8c1 100644
--- a/drivers/genpd/imx/imx8mp-blk-ctrl.c
+++ b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
diff --git a/drivers/genpd/imx/imx93-blk-ctrl.c b/drivers/pmdomain/imx/imx93-blk-ctrl.c
index 40bd90f8b977..40bd90f8b977 100644
--- a/drivers/genpd/imx/imx93-blk-ctrl.c
+++ b/drivers/pmdomain/imx/imx93-blk-ctrl.c
diff --git a/drivers/genpd/imx/imx93-pd.c b/drivers/pmdomain/imx/imx93-pd.c
index b9e60d136875..b9e60d136875 100644
--- a/drivers/genpd/imx/imx93-pd.c
+++ b/drivers/pmdomain/imx/imx93-pd.c
diff --git a/drivers/genpd/imx/scu-pd.c b/drivers/pmdomain/imx/scu-pd.c
index 2f693b67ddb4..2f693b67ddb4 100644
--- a/drivers/genpd/imx/scu-pd.c
+++ b/drivers/pmdomain/imx/scu-pd.c
diff --git a/drivers/genpd/mediatek/Makefile b/drivers/pmdomain/mediatek/Makefile
index 8cde09e654b3..8cde09e654b3 100644
--- a/drivers/genpd/mediatek/Makefile
+++ b/drivers/pmdomain/mediatek/Makefile
diff --git a/drivers/genpd/mediatek/mt6795-pm-domains.h b/drivers/pmdomain/mediatek/mt6795-pm-domains.h
index ef07c9dfdd9b..ef07c9dfdd9b 100644
--- a/drivers/genpd/mediatek/mt6795-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt6795-pm-domains.h
diff --git a/drivers/genpd/mediatek/mt8167-pm-domains.h b/drivers/pmdomain/mediatek/mt8167-pm-domains.h
index 4d6c32759606..4d6c32759606 100644
--- a/drivers/genpd/mediatek/mt8167-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8167-pm-domains.h
diff --git a/drivers/genpd/mediatek/mt8173-pm-domains.h b/drivers/pmdomain/mediatek/mt8173-pm-domains.h
index 1a5dc63b7357..1a5dc63b7357 100644
--- a/drivers/genpd/mediatek/mt8173-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8173-pm-domains.h
diff --git a/drivers/genpd/mediatek/mt8183-pm-domains.h b/drivers/pmdomain/mediatek/mt8183-pm-domains.h
index 99de67fe5de8..99de67fe5de8 100644
--- a/drivers/genpd/mediatek/mt8183-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8183-pm-domains.h
diff --git a/drivers/genpd/mediatek/mt8186-pm-domains.h b/drivers/pmdomain/mediatek/mt8186-pm-domains.h
index fce86f79c505..fce86f79c505 100644
--- a/drivers/genpd/mediatek/mt8186-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8186-pm-domains.h
diff --git a/drivers/genpd/mediatek/mt8188-pm-domains.h b/drivers/pmdomain/mediatek/mt8188-pm-domains.h
index 0692cb444ed0..0692cb444ed0 100644
--- a/drivers/genpd/mediatek/mt8188-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8188-pm-domains.h
diff --git a/drivers/genpd/mediatek/mt8192-pm-domains.h b/drivers/pmdomain/mediatek/mt8192-pm-domains.h
index b97b2051920f..b97b2051920f 100644
--- a/drivers/genpd/mediatek/mt8192-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8192-pm-domains.h
diff --git a/drivers/genpd/mediatek/mt8195-pm-domains.h b/drivers/pmdomain/mediatek/mt8195-pm-domains.h
index d7387ea1b9c9..d7387ea1b9c9 100644
--- a/drivers/genpd/mediatek/mt8195-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8195-pm-domains.h
diff --git a/drivers/genpd/mediatek/mtk-pm-domains.c b/drivers/pmdomain/mediatek/mtk-pm-domains.c
index ee962804b830..ee962804b830 100644
--- a/drivers/genpd/mediatek/mtk-pm-domains.c
+++ b/drivers/pmdomain/mediatek/mtk-pm-domains.c
diff --git a/drivers/genpd/mediatek/mtk-pm-domains.h b/drivers/pmdomain/mediatek/mtk-pm-domains.h
index 5ec53ee073c4..5ec53ee073c4 100644
--- a/drivers/genpd/mediatek/mtk-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mtk-pm-domains.h
diff --git a/drivers/genpd/mediatek/mtk-scpsys.c b/drivers/pmdomain/mediatek/mtk-scpsys.c
index b374d01fdac7..b374d01fdac7 100644
--- a/drivers/genpd/mediatek/mtk-scpsys.c
+++ b/drivers/pmdomain/mediatek/mtk-scpsys.c
diff --git a/drivers/genpd/qcom/Makefile b/drivers/pmdomain/qcom/Makefile
index 403dfc5af095..403dfc5af095 100644
--- a/drivers/genpd/qcom/Makefile
+++ b/drivers/pmdomain/qcom/Makefile
diff --git a/drivers/genpd/qcom/cpr.c b/drivers/pmdomain/qcom/cpr.c
index 94a3f0977212..94a3f0977212 100644
--- a/drivers/genpd/qcom/cpr.c
+++ b/drivers/pmdomain/qcom/cpr.c
diff --git a/drivers/genpd/qcom/rpmhpd.c b/drivers/pmdomain/qcom/rpmhpd.c
index a87e336d5e33..a87e336d5e33 100644
--- a/drivers/genpd/qcom/rpmhpd.c
+++ b/drivers/pmdomain/qcom/rpmhpd.c
diff --git a/drivers/genpd/qcom/rpmpd.c b/drivers/pmdomain/qcom/rpmpd.c
index 3135dd1dafe0..3135dd1dafe0 100644
--- a/drivers/genpd/qcom/rpmpd.c
+++ b/drivers/pmdomain/qcom/rpmpd.c
diff --git a/drivers/genpd/renesas/Makefile b/drivers/pmdomain/renesas/Makefile
index e306e396fc8c..e306e396fc8c 100644
--- a/drivers/genpd/renesas/Makefile
+++ b/drivers/pmdomain/renesas/Makefile
diff --git a/drivers/genpd/renesas/r8a7742-sysc.c b/drivers/pmdomain/renesas/r8a7742-sysc.c
index 219a675f83f4..219a675f83f4 100644
--- a/drivers/genpd/renesas/r8a7742-sysc.c
+++ b/drivers/pmdomain/renesas/r8a7742-sysc.c
diff --git a/drivers/genpd/renesas/r8a7743-sysc.c b/drivers/pmdomain/renesas/r8a7743-sysc.c
index 4e2c0ab951b3..4e2c0ab951b3 100644
--- a/drivers/genpd/renesas/r8a7743-sysc.c
+++ b/drivers/pmdomain/renesas/r8a7743-sysc.c
diff --git a/drivers/genpd/renesas/r8a7745-sysc.c b/drivers/pmdomain/renesas/r8a7745-sysc.c
index 865821a2f0c6..865821a2f0c6 100644
--- a/drivers/genpd/renesas/r8a7745-sysc.c
+++ b/drivers/pmdomain/renesas/r8a7745-sysc.c
diff --git a/drivers/genpd/renesas/r8a77470-sysc.c b/drivers/pmdomain/renesas/r8a77470-sysc.c
index 1eeb8018df50..1eeb8018df50 100644
--- a/drivers/genpd/renesas/r8a77470-sysc.c
+++ b/drivers/pmdomain/renesas/r8a77470-sysc.c
diff --git a/drivers/genpd/renesas/r8a774a1-sysc.c b/drivers/pmdomain/renesas/r8a774a1-sysc.c
index 38ac2c689ff0..38ac2c689ff0 100644
--- a/drivers/genpd/renesas/r8a774a1-sysc.c
+++ b/drivers/pmdomain/renesas/r8a774a1-sysc.c
diff --git a/drivers/genpd/renesas/r8a774b1-sysc.c b/drivers/pmdomain/renesas/r8a774b1-sysc.c
index 5f97ff26f3f8..5f97ff26f3f8 100644
--- a/drivers/genpd/renesas/r8a774b1-sysc.c
+++ b/drivers/pmdomain/renesas/r8a774b1-sysc.c
diff --git a/drivers/genpd/renesas/r8a774c0-sysc.c b/drivers/pmdomain/renesas/r8a774c0-sysc.c
index c1c216f7d073..c1c216f7d073 100644
--- a/drivers/genpd/renesas/r8a774c0-sysc.c
+++ b/drivers/pmdomain/renesas/r8a774c0-sysc.c
diff --git a/drivers/genpd/renesas/r8a774e1-sysc.c b/drivers/pmdomain/renesas/r8a774e1-sysc.c
index 18449f746455..18449f746455 100644
--- a/drivers/genpd/renesas/r8a774e1-sysc.c
+++ b/drivers/pmdomain/renesas/r8a774e1-sysc.c
diff --git a/drivers/genpd/renesas/r8a7779-sysc.c b/drivers/pmdomain/renesas/r8a7779-sysc.c
index e24a7151d55f..e24a7151d55f 100644
--- a/drivers/genpd/renesas/r8a7779-sysc.c
+++ b/drivers/pmdomain/renesas/r8a7779-sysc.c
diff --git a/drivers/genpd/renesas/r8a7790-sysc.c b/drivers/pmdomain/renesas/r8a7790-sysc.c
index b9afe7f6245b..b9afe7f6245b 100644
--- a/drivers/genpd/renesas/r8a7790-sysc.c
+++ b/drivers/pmdomain/renesas/r8a7790-sysc.c
diff --git a/drivers/genpd/renesas/r8a7791-sysc.c b/drivers/pmdomain/renesas/r8a7791-sysc.c
index f00fa24522a3..f00fa24522a3 100644
--- a/drivers/genpd/renesas/r8a7791-sysc.c
+++ b/drivers/pmdomain/renesas/r8a7791-sysc.c
diff --git a/drivers/genpd/renesas/r8a7792-sysc.c b/drivers/pmdomain/renesas/r8a7792-sysc.c
index 60aae242c43f..60aae242c43f 100644
--- a/drivers/genpd/renesas/r8a7792-sysc.c
+++ b/drivers/pmdomain/renesas/r8a7792-sysc.c
diff --git a/drivers/genpd/renesas/r8a7794-sysc.c b/drivers/pmdomain/renesas/r8a7794-sysc.c
index 72ef4e85458f..72ef4e85458f 100644
--- a/drivers/genpd/renesas/r8a7794-sysc.c
+++ b/drivers/pmdomain/renesas/r8a7794-sysc.c
diff --git a/drivers/genpd/renesas/r8a7795-sysc.c b/drivers/pmdomain/renesas/r8a7795-sysc.c
index cbe1ff0fc583..cbe1ff0fc583 100644
--- a/drivers/genpd/renesas/r8a7795-sysc.c
+++ b/drivers/pmdomain/renesas/r8a7795-sysc.c
diff --git a/drivers/genpd/renesas/r8a7796-sysc.c b/drivers/pmdomain/renesas/r8a7796-sysc.c
index 471bd5b3b6ad..471bd5b3b6ad 100644
--- a/drivers/genpd/renesas/r8a7796-sysc.c
+++ b/drivers/pmdomain/renesas/r8a7796-sysc.c
diff --git a/drivers/genpd/renesas/r8a77965-sysc.c b/drivers/pmdomain/renesas/r8a77965-sysc.c
index ff0b0d116992..ff0b0d116992 100644
--- a/drivers/genpd/renesas/r8a77965-sysc.c
+++ b/drivers/pmdomain/renesas/r8a77965-sysc.c
diff --git a/drivers/genpd/renesas/r8a77970-sysc.c b/drivers/pmdomain/renesas/r8a77970-sysc.c
index 706258250600..706258250600 100644
--- a/drivers/genpd/renesas/r8a77970-sysc.c
+++ b/drivers/pmdomain/renesas/r8a77970-sysc.c
diff --git a/drivers/genpd/renesas/r8a77980-sysc.c b/drivers/pmdomain/renesas/r8a77980-sysc.c
index 39ca84a67daa..39ca84a67daa 100644
--- a/drivers/genpd/renesas/r8a77980-sysc.c
+++ b/drivers/pmdomain/renesas/r8a77980-sysc.c
diff --git a/drivers/genpd/renesas/r8a77990-sysc.c b/drivers/pmdomain/renesas/r8a77990-sysc.c
index 9f92737dc352..9f92737dc352 100644
--- a/drivers/genpd/renesas/r8a77990-sysc.c
+++ b/drivers/pmdomain/renesas/r8a77990-sysc.c
diff --git a/drivers/genpd/renesas/r8a77995-sysc.c b/drivers/pmdomain/renesas/r8a77995-sysc.c
index efcc67e3d76d..efcc67e3d76d 100644
--- a/drivers/genpd/renesas/r8a77995-sysc.c
+++ b/drivers/pmdomain/renesas/r8a77995-sysc.c
diff --git a/drivers/genpd/renesas/r8a779a0-sysc.c b/drivers/pmdomain/renesas/r8a779a0-sysc.c
index 04f1bc322ae7..04f1bc322ae7 100644
--- a/drivers/genpd/renesas/r8a779a0-sysc.c
+++ b/drivers/pmdomain/renesas/r8a779a0-sysc.c
diff --git a/drivers/genpd/renesas/r8a779f0-sysc.c b/drivers/pmdomain/renesas/r8a779f0-sysc.c
index 5602aa6bd7ed..5602aa6bd7ed 100644
--- a/drivers/genpd/renesas/r8a779f0-sysc.c
+++ b/drivers/pmdomain/renesas/r8a779f0-sysc.c
diff --git a/drivers/genpd/renesas/r8a779g0-sysc.c b/drivers/pmdomain/renesas/r8a779g0-sysc.c
index b932eba1b804..b932eba1b804 100644
--- a/drivers/genpd/renesas/r8a779g0-sysc.c
+++ b/drivers/pmdomain/renesas/r8a779g0-sysc.c
diff --git a/drivers/genpd/renesas/rcar-gen4-sysc.c b/drivers/pmdomain/renesas/rcar-gen4-sysc.c
index 9e5e6e077abc..9e5e6e077abc 100644
--- a/drivers/genpd/renesas/rcar-gen4-sysc.c
+++ b/drivers/pmdomain/renesas/rcar-gen4-sysc.c
diff --git a/drivers/genpd/renesas/rcar-gen4-sysc.h b/drivers/pmdomain/renesas/rcar-gen4-sysc.h
index 388cfa8f8f9f..388cfa8f8f9f 100644
--- a/drivers/genpd/renesas/rcar-gen4-sysc.h
+++ b/drivers/pmdomain/renesas/rcar-gen4-sysc.h
diff --git a/drivers/genpd/renesas/rcar-sysc.c b/drivers/pmdomain/renesas/rcar-sysc.c
index eed47696e825..eed47696e825 100644
--- a/drivers/genpd/renesas/rcar-sysc.c
+++ b/drivers/pmdomain/renesas/rcar-sysc.c
diff --git a/drivers/genpd/renesas/rcar-sysc.h b/drivers/pmdomain/renesas/rcar-sysc.h
index 266c599a0a9b..266c599a0a9b 100644
--- a/drivers/genpd/renesas/rcar-sysc.h
+++ b/drivers/pmdomain/renesas/rcar-sysc.h
diff --git a/drivers/genpd/renesas/rmobile-sysc.c b/drivers/pmdomain/renesas/rmobile-sysc.c
index 912daadaa10d..912daadaa10d 100644
--- a/drivers/genpd/renesas/rmobile-sysc.c
+++ b/drivers/pmdomain/renesas/rmobile-sysc.c
diff --git a/drivers/genpd/rockchip/Makefile b/drivers/pmdomain/rockchip/Makefile
index 8fb9d88a3492..8fb9d88a3492 100644
--- a/drivers/genpd/rockchip/Makefile
+++ b/drivers/pmdomain/rockchip/Makefile
diff --git a/drivers/genpd/rockchip/pm-domains.c b/drivers/pmdomain/rockchip/pm-domains.c
index d5d3ecb38283..d5d3ecb38283 100644
--- a/drivers/genpd/rockchip/pm-domains.c
+++ b/drivers/pmdomain/rockchip/pm-domains.c
diff --git a/drivers/genpd/samsung/Makefile b/drivers/pmdomain/samsung/Makefile
index 397aa5908c1d..397aa5908c1d 100644
--- a/drivers/genpd/samsung/Makefile
+++ b/drivers/pmdomain/samsung/Makefile
diff --git a/drivers/genpd/samsung/exynos-pm-domains.c b/drivers/pmdomain/samsung/exynos-pm-domains.c
index 9b502e8751d1..9b502e8751d1 100644
--- a/drivers/genpd/samsung/exynos-pm-domains.c
+++ b/drivers/pmdomain/samsung/exynos-pm-domains.c
diff --git a/drivers/genpd/st/Makefile b/drivers/pmdomain/st/Makefile
index 8fa5f9855460..8fa5f9855460 100644
--- a/drivers/genpd/st/Makefile
+++ b/drivers/pmdomain/st/Makefile
diff --git a/drivers/genpd/st/ste-ux500-pm-domain.c b/drivers/pmdomain/st/ste-ux500-pm-domain.c
index 3d4f111ed156..3d4f111ed156 100644
--- a/drivers/genpd/st/ste-ux500-pm-domain.c
+++ b/drivers/pmdomain/st/ste-ux500-pm-domain.c
diff --git a/drivers/genpd/starfive/Makefile b/drivers/pmdomain/starfive/Makefile
index 975bba2a29a9..975bba2a29a9 100644
--- a/drivers/genpd/starfive/Makefile
+++ b/drivers/pmdomain/starfive/Makefile
diff --git a/drivers/genpd/starfive/jh71xx-pmu.c b/drivers/pmdomain/starfive/jh71xx-pmu.c
index 7d5f50d71c0d..7d5f50d71c0d 100644
--- a/drivers/genpd/starfive/jh71xx-pmu.c
+++ b/drivers/pmdomain/starfive/jh71xx-pmu.c
diff --git a/drivers/genpd/sunxi/Makefile b/drivers/pmdomain/sunxi/Makefile
index ec1d7a2fb21d..ec1d7a2fb21d 100644
--- a/drivers/genpd/sunxi/Makefile
+++ b/drivers/pmdomain/sunxi/Makefile
diff --git a/drivers/genpd/sunxi/sun20i-ppu.c b/drivers/pmdomain/sunxi/sun20i-ppu.c
index 8700f9dd5f75..8700f9dd5f75 100644
--- a/drivers/genpd/sunxi/sun20i-ppu.c
+++ b/drivers/pmdomain/sunxi/sun20i-ppu.c
diff --git a/drivers/genpd/tegra/Makefile b/drivers/pmdomain/tegra/Makefile
index ec8acfd2c77c..ec8acfd2c77c 100644
--- a/drivers/genpd/tegra/Makefile
+++ b/drivers/pmdomain/tegra/Makefile
diff --git a/drivers/genpd/tegra/powergate-bpmp.c b/drivers/pmdomain/tegra/powergate-bpmp.c
index 179ed895c279..179ed895c279 100644
--- a/drivers/genpd/tegra/powergate-bpmp.c
+++ b/drivers/pmdomain/tegra/powergate-bpmp.c
diff --git a/drivers/genpd/ti/Makefile b/drivers/pmdomain/ti/Makefile
index 69580afbb436..69580afbb436 100644
--- a/drivers/genpd/ti/Makefile
+++ b/drivers/pmdomain/ti/Makefile
diff --git a/drivers/genpd/ti/omap_prm.c b/drivers/pmdomain/ti/omap_prm.c
index c2feae3a634c..c2feae3a634c 100644
--- a/drivers/genpd/ti/omap_prm.c
+++ b/drivers/pmdomain/ti/omap_prm.c
diff --git a/drivers/genpd/ti/ti_sci_pm_domains.c b/drivers/pmdomain/ti/ti_sci_pm_domains.c
index 34645104fe45..34645104fe45 100644
--- a/drivers/genpd/ti/ti_sci_pm_domains.c
+++ b/drivers/pmdomain/ti/ti_sci_pm_domains.c
diff --git a/drivers/genpd/xilinx/Makefile b/drivers/pmdomain/xilinx/Makefile
index a706ab699cfa..a706ab699cfa 100644
--- a/drivers/genpd/xilinx/Makefile
+++ b/drivers/pmdomain/xilinx/Makefile
diff --git a/drivers/genpd/xilinx/zynqmp-pm-domains.c b/drivers/pmdomain/xilinx/zynqmp-pm-domains.c
index 69d03ad4cf1e..69d03ad4cf1e 100644
--- a/drivers/genpd/xilinx/zynqmp-pm-domains.c
+++ b/drivers/pmdomain/xilinx/zynqmp-pm-domains.c
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 59e1ebb7842e..411e00b255d6 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -300,7 +300,7 @@ config NVMEM_REBOOT_MODE
config POWER_MLXBF
tristate "Mellanox BlueField power handling driver"
- depends on (GPIO_MLXBF2 && ACPI)
+ depends on (GPIO_MLXBF2 || GPIO_MLXBF3) && ACPI
help
This driver supports reset or low power mode handling for Mellanox BlueField.
diff --git a/drivers/power/reset/pwr-mlxbf.c b/drivers/power/reset/pwr-mlxbf.c
index 12dedf841a44..de35d24bb7ef 100644
--- a/drivers/power/reset/pwr-mlxbf.c
+++ b/drivers/power/reset/pwr-mlxbf.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only or BSD-3-Clause
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
/*
* Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES.
diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c
index 447ffdacddf9..17064d7b19f6 100644
--- a/drivers/power/reset/vexpress-poweroff.c
+++ b/drivers/power/reset/vexpress-poweroff.c
@@ -121,7 +121,7 @@ static int vexpress_reset_probe(struct platform_device *pdev)
return PTR_ERR(regmap);
dev_set_drvdata(&pdev->dev, regmap);
- switch ((enum vexpress_reset_func)match->data) {
+ switch ((uintptr_t)match->data) {
case FUNC_SHUTDOWN:
vexpress_power_off_device = &pdev->dev;
pm_power_off = vexpress_power_off;
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 663a1c423806..a61bb1283e19 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -769,6 +769,7 @@ config BATTERY_RT5033
config CHARGER_RT5033
tristate "RT5033 battery charger support"
depends on MFD_RT5033
+ depends on EXTCON || !EXTCON
help
This adds support for battery charger in Richtek RT5033 PMIC.
The device supports pre-charge mode, fast charge mode and
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index 6f83e99d2eb7..ce36d6ca3422 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -115,7 +115,6 @@ struct ab8500_btemp {
static enum power_supply_property ab8500_btemp_props[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
- POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_TEMP,
};
@@ -532,12 +531,6 @@ static int ab8500_btemp_get_property(struct power_supply *psy,
else
val->intval = 1;
break;
- case POWER_SUPPLY_PROP_TECHNOLOGY:
- if (di->bm->bi)
- val->intval = di->bm->bi->technology;
- else
- val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
- break;
case POWER_SUPPLY_PROP_TEMP:
val->intval = ab8500_btemp_get_temp(di);
break;
@@ -662,7 +655,7 @@ static char *supply_interface[] = {
static const struct power_supply_desc ab8500_btemp_desc = {
.name = "ab8500_btemp",
- .type = POWER_SUPPLY_TYPE_BATTERY,
+ .type = POWER_SUPPLY_TYPE_UNKNOWN,
.properties = ab8500_btemp_props,
.num_properties = ARRAY_SIZE(ab8500_btemp_props),
.get_property = ab8500_btemp_get_property,
diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
index ea4ad61d4c7e..2205ea0834a6 100644
--- a/drivers/power/supply/ab8500_chargalg.c
+++ b/drivers/power/supply/ab8500_chargalg.c
@@ -1720,7 +1720,7 @@ static char *supply_interface[] = {
static const struct power_supply_desc ab8500_chargalg_desc = {
.name = "ab8500_chargalg",
- .type = POWER_SUPPLY_TYPE_BATTERY,
+ .type = POWER_SUPPLY_TYPE_UNKNOWN,
.properties = ab8500_chargalg_props,
.num_properties = ARRAY_SIZE(ab8500_chargalg_props),
.get_property = ab8500_chargalg_get_property,
diff --git a/drivers/power/supply/mt6370-charger.c b/drivers/power/supply/mt6370-charger.c
index f27dae5043f5..a9641bd3d8cf 100644
--- a/drivers/power/supply/mt6370-charger.c
+++ b/drivers/power/supply/mt6370-charger.c
@@ -324,7 +324,7 @@ static int mt6370_chg_toggle_cfo(struct mt6370_priv *priv)
if (fl_strobe) {
dev_err(priv->dev, "Flash led is still in strobe mode\n");
- return ret;
+ return -EINVAL;
}
/* cfo off */
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 4aa466c945e2..0b69fb7bafd8 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -1309,8 +1309,8 @@ static int psy_register_thermal(struct power_supply *psy)
struct thermal_zone_params tzp = {
.no_hwmon = IS_ENABLED(CONFIG_POWER_SUPPLY_HWMON)
};
- psy->tzd = thermal_zone_device_register(psy->desc->name,
- 0, 0, psy, &psy_tzd_ops, &tzp, 0, 0);
+ psy->tzd = thermal_tripless_zone_device_register(psy->desc->name,
+ psy, &psy_tzd_ops, &tzp);
if (IS_ERR(psy->tzd))
return PTR_ERR(psy->tzd);
ret = thermal_zone_device_enable(psy->tzd);
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 06e5b6b0e255..d483a81560ab 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -482,6 +482,13 @@ int power_supply_uevent(const struct device *dev, struct kobj_uevent_env *env)
if (ret)
return ret;
+ /*
+ * Kernel generates KOBJ_REMOVE uevent in device removal path, after
+ * resources have been freed. Exit early to avoid use-after-free.
+ */
+ if (psy->removing)
+ return 0;
+
prop_buf = (char *)get_zeroed_page(GFP_KERNEL);
if (!prop_buf)
return -ENOMEM;
diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c
index 8328bcea1a29..f64daf5a41d9 100644
--- a/drivers/power/supply/rk817_charger.c
+++ b/drivers/power/supply/rk817_charger.c
@@ -1045,6 +1045,13 @@ static void rk817_charging_monitor(struct work_struct *work)
queue_delayed_work(system_wq, &charger->work, msecs_to_jiffies(8000));
}
+static void rk817_cleanup_node(void *data)
+{
+ struct device_node *node = data;
+
+ of_node_put(node);
+}
+
static int rk817_charger_probe(struct platform_device *pdev)
{
struct rk808 *rk808 = dev_get_drvdata(pdev->dev.parent);
@@ -1061,11 +1068,13 @@ static int rk817_charger_probe(struct platform_device *pdev)
if (!node)
return -ENODEV;
+ ret = devm_add_action_or_reset(&pdev->dev, rk817_cleanup_node, node);
+ if (ret)
+ return ret;
+
charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL);
- if (!charger) {
- of_node_put(node);
+ if (!charger)
return -ENOMEM;
- }
charger->rk808 = rk808;
@@ -1211,3 +1220,4 @@ MODULE_DESCRIPTION("Battery power supply driver for RK817 PMIC");
MODULE_AUTHOR("Maya Matuszczyk <maccraft123mc@gmail.com>");
MODULE_AUTHOR("Chris Morgan <macromorgan@hotmail.com>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:rk817-charger");
diff --git a/drivers/power/supply/rt9467-charger.c b/drivers/power/supply/rt9467-charger.c
index 683adb18253d..fdfdc83ab045 100644
--- a/drivers/power/supply/rt9467-charger.c
+++ b/drivers/power/supply/rt9467-charger.c
@@ -598,8 +598,8 @@ static int rt9467_run_aicl(struct rt9467_chg_data *data)
reinit_completion(&data->aicl_done);
ret = wait_for_completion_timeout(&data->aicl_done, msecs_to_jiffies(3500));
- if (ret)
- return ret;
+ if (ret == 0)
+ return -ETIMEDOUT;
ret = rt9467_get_value_from_ranges(data, F_IAICR, RT9467_RANGE_IAICR, &aicr_get);
if (ret) {
diff --git a/drivers/power/supply/ucs1002_power.c b/drivers/power/supply/ucs1002_power.c
index 954feba6600b..7970843a4f48 100644
--- a/drivers/power/supply/ucs1002_power.c
+++ b/drivers/power/supply/ucs1002_power.c
@@ -384,7 +384,8 @@ static int ucs1002_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_USB_TYPE:
return ucs1002_get_usb_type(info, val);
case POWER_SUPPLY_PROP_HEALTH:
- return val->intval = info->health;
+ val->intval = info->health;
+ return 0;
case POWER_SUPPLY_PROP_PRESENT:
val->intval = info->present;
return 0;
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 5c2e6d5eea2a..40a2cc649c79 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -658,8 +658,6 @@ static struct rapl_primitive_info rpi_msr[NR_RAPL_PRIMITIVES] = {
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
[PL2_CLAMP] = PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48,
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
- [PL4_ENABLE] = PRIMITIVE_INFO_INIT(PL4_ENABLE, POWER_LIMIT4_MASK, 0,
- RAPL_DOMAIN_REG_PL4, ARBITRARY_UNIT, 0),
[TIME_WINDOW1] = PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17,
RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
[TIME_WINDOW2] = PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49,
@@ -1458,7 +1456,7 @@ static void rapl_detect_powerlimit(struct rapl_domain *rd)
}
}
- if (rapl_read_pl_data(rd, i, PL_ENABLE, false, &val64))
+ if (rapl_read_pl_data(rd, i, PL_LIMIT, false, &val64))
rd->rpl[i].name = NULL;
}
}
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 20a974ced8d6..a7a6947ab4bc 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -3998,7 +3998,6 @@ ptp_ocp_device_init(struct ptp_ocp *bp, struct pci_dev *pdev)
return 0;
out:
- ptp_ocp_dev_release(&bp->dev);
put_device(&bp->dev);
return err;
}
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 6210babb0741..8ebcddf91f7b 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -505,7 +505,7 @@ config PWM_ROCKCHIP
config PWM_RZ_MTU3
tristate "Renesas RZ/G2L MTU3a PWM Timer support"
- depends on RZ_MTU3 || COMPILE_TEST
+ depends on RZ_MTU3
depends on HAS_IOMEM
help
This driver exposes the MTU3a PWM Timer controller found in Renesas
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 3dacceaef4a9..dc66e3405bf5 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -8,8 +8,8 @@
#include <linux/acpi.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pwm.h>
-#include <linux/radix-tree.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/err.h>
@@ -127,28 +127,28 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)
}
struct pwm_device *
-of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args)
+of_pwm_xlate_with_flags(struct pwm_chip *chip, const struct of_phandle_args *args)
{
struct pwm_device *pwm;
- if (pc->of_pwm_n_cells < 2)
+ if (chip->of_pwm_n_cells < 2)
return ERR_PTR(-EINVAL);
/* flags in the third cell are optional */
if (args->args_count < 2)
return ERR_PTR(-EINVAL);
- if (args->args[0] >= pc->npwm)
+ if (args->args[0] >= chip->npwm)
return ERR_PTR(-EINVAL);
- pwm = pwm_request_from_chip(pc, args->args[0], NULL);
+ pwm = pwm_request_from_chip(chip, args->args[0], NULL);
if (IS_ERR(pwm))
return pwm;
pwm->args.period = args->args[1];
pwm->args.polarity = PWM_POLARITY_NORMAL;
- if (pc->of_pwm_n_cells >= 3) {
+ if (chip->of_pwm_n_cells >= 3) {
if (args->args_count > 2 && args->args[2] & PWM_POLARITY_INVERTED)
pwm->args.polarity = PWM_POLARITY_INVERSED;
}
@@ -158,18 +158,18 @@ of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args)
EXPORT_SYMBOL_GPL(of_pwm_xlate_with_flags);
struct pwm_device *
-of_pwm_single_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
+of_pwm_single_xlate(struct pwm_chip *chip, const struct of_phandle_args *args)
{
struct pwm_device *pwm;
- if (pc->of_pwm_n_cells < 1)
+ if (chip->of_pwm_n_cells < 1)
return ERR_PTR(-EINVAL);
/* validate that one cell is specified, optionally with flags */
if (args->args_count != 1 && args->args_count != 2)
return ERR_PTR(-EINVAL);
- pwm = pwm_request_from_chip(pc, 0, NULL);
+ pwm = pwm_request_from_chip(chip, 0, NULL);
if (IS_ERR(pwm))
return pwm;
@@ -312,22 +312,19 @@ EXPORT_SYMBOL_GPL(pwmchip_add);
* pwmchip_remove() - remove a PWM chip
* @chip: the PWM chip to remove
*
- * Removes a PWM chip. This function may return busy if the PWM chip provides
- * a PWM device that is still requested.
- *
- * Returns: 0 on success or a negative error code on failure.
+ * Removes a PWM chip.
*/
void pwmchip_remove(struct pwm_chip *chip)
{
pwmchip_sysfs_unexport(chip);
+ if (IS_ENABLED(CONFIG_OF))
+ of_pwmchip_remove(chip);
+
mutex_lock(&pwm_lock);
list_del_init(&chip->list);
- if (IS_ENABLED(CONFIG_OF))
- of_pwmchip_remove(chip);
-
free_pwms(chip);
mutex_unlock(&pwm_lock);
@@ -692,7 +689,7 @@ static struct pwm_device *of_pwm_get(struct device *dev, struct device_node *np,
struct pwm_device *pwm = NULL;
struct of_phandle_args args;
struct device_link *dl;
- struct pwm_chip *pc;
+ struct pwm_chip *chip;
int index = 0;
int err;
@@ -709,16 +706,16 @@ static struct pwm_device *of_pwm_get(struct device *dev, struct device_node *np,
return ERR_PTR(err);
}
- pc = fwnode_to_pwmchip(of_fwnode_handle(args.np));
- if (IS_ERR(pc)) {
- if (PTR_ERR(pc) != -EPROBE_DEFER)
+ chip = fwnode_to_pwmchip(of_fwnode_handle(args.np));
+ if (IS_ERR(chip)) {
+ if (PTR_ERR(chip) != -EPROBE_DEFER)
pr_err("%s(): PWM chip not found\n", __func__);
- pwm = ERR_CAST(pc);
+ pwm = ERR_CAST(chip);
goto put;
}
- pwm = pc->of_xlate(pc, &args);
+ pwm = chip->of_xlate(chip, &args);
if (IS_ERR(pwm))
goto put;
diff --git a/drivers/pwm/pwm-apple.c b/drivers/pwm/pwm-apple.c
index a38a62edd713..8e7d67fb5fbe 100644
--- a/drivers/pwm/pwm-apple.c
+++ b/drivers/pwm/pwm-apple.c
@@ -12,6 +12,7 @@
* - When APPLE_PWM_CTRL is set to 0, the output is constant low
*/
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index 96a709a9d49a..e271d920151e 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/mfd/atmel-hlcdc.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
@@ -38,11 +39,11 @@ static inline struct atmel_hlcdc_pwm *to_atmel_hlcdc_pwm(struct pwm_chip *chip)
return container_of(chip, struct atmel_hlcdc_pwm, chip);
}
-static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
+static int atmel_hlcdc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
- struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c);
- struct atmel_hlcdc *hlcdc = chip->hlcdc;
+ struct atmel_hlcdc_pwm *atmel = to_atmel_hlcdc_pwm(chip);
+ struct atmel_hlcdc *hlcdc = atmel->hlcdc;
unsigned int status;
int ret;
@@ -54,7 +55,7 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
u32 pwmcfg;
int pres;
- if (!chip->errata || !chip->errata->slow_clk_erratum) {
+ if (!atmel->errata || !atmel->errata->slow_clk_erratum) {
clk_freq = clk_get_rate(new_clk);
if (!clk_freq)
return -EINVAL;
@@ -64,7 +65,7 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
}
/* Errata: cannot use slow clk on some IP revisions */
- if ((chip->errata && chip->errata->slow_clk_erratum) ||
+ if ((atmel->errata && atmel->errata->slow_clk_erratum) ||
clk_period_ns > state->period) {
new_clk = hlcdc->sys_clk;
clk_freq = clk_get_rate(new_clk);
@@ -77,8 +78,8 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
for (pres = 0; pres <= ATMEL_HLCDC_PWMPS_MAX; pres++) {
/* Errata: cannot divide by 1 on some IP revisions */
- if (!pres && chip->errata &&
- chip->errata->div1_clk_erratum)
+ if (!pres && atmel->errata &&
+ atmel->errata->div1_clk_erratum)
continue;
if ((clk_period_ns << pres) >= state->period)
@@ -90,7 +91,7 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
pwmcfg = ATMEL_HLCDC_PWMPS(pres);
- if (new_clk != chip->cur_clk) {
+ if (new_clk != atmel->cur_clk) {
u32 gencfg = 0;
int ret;
@@ -98,8 +99,8 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
if (ret)
return ret;
- clk_disable_unprepare(chip->cur_clk);
- chip->cur_clk = new_clk;
+ clk_disable_unprepare(atmel->cur_clk);
+ atmel->cur_clk = new_clk;
if (new_clk == hlcdc->sys_clk)
gencfg = ATMEL_HLCDC_CLKPWMSEL;
@@ -160,8 +161,8 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
if (ret)
return ret;
- clk_disable_unprepare(chip->cur_clk);
- chip->cur_clk = NULL;
+ clk_disable_unprepare(atmel->cur_clk);
+ atmel->cur_clk = NULL;
}
return 0;
@@ -183,31 +184,32 @@ static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_sama5d3_errata = {
#ifdef CONFIG_PM_SLEEP
static int atmel_hlcdc_pwm_suspend(struct device *dev)
{
- struct atmel_hlcdc_pwm *chip = dev_get_drvdata(dev);
+ struct atmel_hlcdc_pwm *atmel = dev_get_drvdata(dev);
/* Keep the periph clock enabled if the PWM is still running. */
- if (pwm_is_enabled(&chip->chip.pwms[0]))
- clk_disable_unprepare(chip->hlcdc->periph_clk);
+ if (pwm_is_enabled(&atmel->chip.pwms[0]))
+ clk_disable_unprepare(atmel->hlcdc->periph_clk);
return 0;
}
static int atmel_hlcdc_pwm_resume(struct device *dev)
{
- struct atmel_hlcdc_pwm *chip = dev_get_drvdata(dev);
+ struct atmel_hlcdc_pwm *atmel = dev_get_drvdata(dev);
struct pwm_state state;
int ret;
- pwm_get_state(&chip->chip.pwms[0], &state);
+ pwm_get_state(&atmel->chip.pwms[0], &state);
/* Re-enable the periph clock it was stopped during suspend. */
if (!state.enabled) {
- ret = clk_prepare_enable(chip->hlcdc->periph_clk);
+ ret = clk_prepare_enable(atmel->hlcdc->periph_clk);
if (ret)
return ret;
}
- return atmel_hlcdc_pwm_apply(&chip->chip, &chip->chip.pwms[0], &state);
+ return atmel_hlcdc_pwm_apply(&atmel->chip, &atmel->chip.pwms[0],
+ &state);
}
#endif
@@ -244,14 +246,14 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct device *dev = &pdev->dev;
- struct atmel_hlcdc_pwm *chip;
+ struct atmel_hlcdc_pwm *atmel;
struct atmel_hlcdc *hlcdc;
int ret;
hlcdc = dev_get_drvdata(dev->parent);
- chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
- if (!chip)
+ atmel = devm_kzalloc(dev, sizeof(*atmel), GFP_KERNEL);
+ if (!atmel)
return -ENOMEM;
ret = clk_prepare_enable(hlcdc->periph_clk);
@@ -260,31 +262,31 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
match = of_match_node(atmel_hlcdc_dt_ids, dev->parent->of_node);
if (match)
- chip->errata = match->data;
+ atmel->errata = match->data;
- chip->hlcdc = hlcdc;
- chip->chip.ops = &atmel_hlcdc_pwm_ops;
- chip->chip.dev = dev;
- chip->chip.npwm = 1;
+ atmel->hlcdc = hlcdc;
+ atmel->chip.ops = &atmel_hlcdc_pwm_ops;
+ atmel->chip.dev = dev;
+ atmel->chip.npwm = 1;
- ret = pwmchip_add(&chip->chip);
+ ret = pwmchip_add(&atmel->chip);
if (ret) {
clk_disable_unprepare(hlcdc->periph_clk);
return ret;
}
- platform_set_drvdata(pdev, chip);
+ platform_set_drvdata(pdev, atmel);
return 0;
}
static void atmel_hlcdc_pwm_remove(struct platform_device *pdev)
{
- struct atmel_hlcdc_pwm *chip = platform_get_drvdata(pdev);
+ struct atmel_hlcdc_pwm *atmel = platform_get_drvdata(pdev);
- pwmchip_remove(&chip->chip);
+ pwmchip_remove(&atmel->chip);
- clk_disable_unprepare(chip->hlcdc->periph_clk);
+ clk_disable_unprepare(atmel->hlcdc->periph_clk);
}
static const struct of_device_id atmel_hlcdc_pwm_dt_ids[] = {
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index 4a116dc44f6e..c00dd37c5fbd 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -19,8 +19,7 @@
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <soc/at91/atmel_tcb.h>
@@ -34,7 +33,6 @@
ATMEL_TC_BEEVT | ATMEL_TC_BSWTRG)
struct atmel_tcb_pwm_device {
- enum pwm_polarity polarity; /* PWM polarity */
unsigned div; /* PWM clock divider */
unsigned duty; /* PWM duty expressed in clk cycles */
unsigned period; /* PWM period expressed in clk cycles */
@@ -57,7 +55,7 @@ struct atmel_tcb_pwm_chip {
struct clk *clk;
struct clk *gclk;
struct clk *slow_clk;
- struct atmel_tcb_pwm_device *pwms[NPWM];
+ struct atmel_tcb_pwm_device pwms[NPWM];
struct atmel_tcb_channel bkup;
};
@@ -68,37 +66,18 @@ static inline struct atmel_tcb_pwm_chip *to_tcb_chip(struct pwm_chip *chip)
return container_of(chip, struct atmel_tcb_pwm_chip, chip);
}
-static int atmel_tcb_pwm_set_polarity(struct pwm_chip *chip,
- struct pwm_device *pwm,
- enum pwm_polarity polarity)
-{
- struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
- struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
-
- tcbpwm->polarity = polarity;
-
- return 0;
-}
-
static int atmel_tcb_pwm_request(struct pwm_chip *chip,
struct pwm_device *pwm)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
- struct atmel_tcb_pwm_device *tcbpwm;
+ struct atmel_tcb_pwm_device *tcbpwm = &tcbpwmc->pwms[pwm->hwpwm];
unsigned cmr;
int ret;
- tcbpwm = devm_kzalloc(chip->dev, sizeof(*tcbpwm), GFP_KERNEL);
- if (!tcbpwm)
- return -ENOMEM;
-
ret = clk_prepare_enable(tcbpwmc->clk);
- if (ret) {
- devm_kfree(chip->dev, tcbpwm);
+ if (ret)
return ret;
- }
- tcbpwm->polarity = PWM_POLARITY_NORMAL;
tcbpwm->duty = 0;
tcbpwm->period = 0;
tcbpwm->div = 0;
@@ -131,27 +110,22 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip,
regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), cmr);
spin_unlock(&tcbpwmc->lock);
- tcbpwmc->pwms[pwm->hwpwm] = tcbpwm;
-
return 0;
}
static void atmel_tcb_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
- struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
clk_disable_unprepare(tcbpwmc->clk);
- tcbpwmc->pwms[pwm->hwpwm] = NULL;
- devm_kfree(chip->dev, tcbpwm);
}
-static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm,
+ enum pwm_polarity polarity)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
- struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
+ struct atmel_tcb_pwm_device *tcbpwm = &tcbpwmc->pwms[pwm->hwpwm];
unsigned cmr;
- enum pwm_polarity polarity = tcbpwm->polarity;
/*
* If duty is 0 the timer will be stopped and we have to
@@ -203,12 +177,12 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
spin_unlock(&tcbpwmc->lock);
}
-static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm,
+ enum pwm_polarity polarity)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
- struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
+ struct atmel_tcb_pwm_device *tcbpwm = &tcbpwmc->pwms[pwm->hwpwm];
u32 cmr;
- enum pwm_polarity polarity = tcbpwm->polarity;
/*
* If duty is 0 the timer will be stopped and we have to
@@ -291,7 +265,7 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
- struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
+ struct atmel_tcb_pwm_device *tcbpwm = &tcbpwmc->pwms[pwm->hwpwm];
struct atmel_tcb_pwm_device *atcbpwm = NULL;
int i = 0;
int slowclk = 0;
@@ -338,9 +312,9 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
period = div_u64(period_ns, min);
if (pwm->hwpwm == 0)
- atcbpwm = tcbpwmc->pwms[1];
+ atcbpwm = &tcbpwmc->pwms[1];
else
- atcbpwm = tcbpwmc->pwms[0];
+ atcbpwm = &tcbpwmc->pwms[0];
/*
* PWM devices provided by the TCB driver are grouped by 2.
@@ -371,11 +345,8 @@ static int atmel_tcb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_cycle, period;
int ret;
- /* This function only sets a flag in driver data */
- atmel_tcb_pwm_set_polarity(chip, pwm, state->polarity);
-
if (!state->enabled) {
- atmel_tcb_pwm_disable(chip, pwm);
+ atmel_tcb_pwm_disable(chip, pwm, state->polarity);
return 0;
}
@@ -386,7 +357,7 @@ static int atmel_tcb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (ret)
return ret;
- return atmel_tcb_pwm_enable(chip, pwm);
+ return atmel_tcb_pwm_enable(chip, pwm, state->polarity);
}
static const struct pwm_ops atmel_tcb_pwm_ops = {
@@ -422,13 +393,14 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
struct atmel_tcb_pwm_chip *tcbpwm;
const struct atmel_tcb_config *config;
struct device_node *np = pdev->dev.of_node;
- struct regmap *regmap;
- struct clk *clk, *gclk = NULL;
- struct clk *slow_clk;
char clk_name[] = "t0_clk";
int err;
int channel;
+ tcbpwm = devm_kzalloc(&pdev->dev, sizeof(*tcbpwm), GFP_KERNEL);
+ if (tcbpwm == NULL)
+ return -ENOMEM;
+
err = of_property_read_u32(np, "reg", &channel);
if (err < 0) {
dev_err(&pdev->dev,
@@ -437,49 +409,43 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
return err;
}
- regmap = syscon_node_to_regmap(np->parent);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
+ tcbpwm->regmap = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(tcbpwm->regmap))
+ return PTR_ERR(tcbpwm->regmap);
- slow_clk = of_clk_get_by_name(np->parent, "slow_clk");
- if (IS_ERR(slow_clk))
- return PTR_ERR(slow_clk);
+ tcbpwm->slow_clk = of_clk_get_by_name(np->parent, "slow_clk");
+ if (IS_ERR(tcbpwm->slow_clk))
+ return PTR_ERR(tcbpwm->slow_clk);
clk_name[1] += channel;
- clk = of_clk_get_by_name(np->parent, clk_name);
- if (IS_ERR(clk))
- clk = of_clk_get_by_name(np->parent, "t0_clk");
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ tcbpwm->clk = of_clk_get_by_name(np->parent, clk_name);
+ if (IS_ERR(tcbpwm->clk))
+ tcbpwm->clk = of_clk_get_by_name(np->parent, "t0_clk");
+ if (IS_ERR(tcbpwm->clk)) {
+ err = PTR_ERR(tcbpwm->clk);
+ goto err_slow_clk;
+ }
match = of_match_node(atmel_tcb_of_match, np->parent);
config = match->data;
if (config->has_gclk) {
- gclk = of_clk_get_by_name(np->parent, "gclk");
- if (IS_ERR(gclk))
- return PTR_ERR(gclk);
- }
-
- tcbpwm = devm_kzalloc(&pdev->dev, sizeof(*tcbpwm), GFP_KERNEL);
- if (tcbpwm == NULL) {
- err = -ENOMEM;
- goto err_slow_clk;
+ tcbpwm->gclk = of_clk_get_by_name(np->parent, "gclk");
+ if (IS_ERR(tcbpwm->gclk)) {
+ err = PTR_ERR(tcbpwm->gclk);
+ goto err_clk;
+ }
}
tcbpwm->chip.dev = &pdev->dev;
tcbpwm->chip.ops = &atmel_tcb_pwm_ops;
tcbpwm->chip.npwm = NPWM;
tcbpwm->channel = channel;
- tcbpwm->regmap = regmap;
- tcbpwm->clk = clk;
- tcbpwm->gclk = gclk;
- tcbpwm->slow_clk = slow_clk;
tcbpwm->width = config->counter_width;
- err = clk_prepare_enable(slow_clk);
+ err = clk_prepare_enable(tcbpwm->slow_clk);
if (err)
- goto err_slow_clk;
+ goto err_gclk;
spin_lock_init(&tcbpwm->lock);
@@ -494,8 +460,14 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
err_disable_clk:
clk_disable_unprepare(tcbpwm->slow_clk);
+err_gclk:
+ clk_put(tcbpwm->gclk);
+
+err_clk:
+ clk_put(tcbpwm->clk);
+
err_slow_clk:
- clk_put(slow_clk);
+ clk_put(tcbpwm->slow_clk);
return err;
}
@@ -507,8 +479,9 @@ static void atmel_tcb_pwm_remove(struct platform_device *pdev)
pwmchip_remove(&tcbpwm->chip);
clk_disable_unprepare(tcbpwm->slow_clk);
- clk_put(tcbpwm->slow_clk);
+ clk_put(tcbpwm->gclk);
clk_put(tcbpwm->clk);
+ clk_put(tcbpwm->slow_clk);
}
static const struct of_device_id atmel_tcb_pwm_dt_ids[] = {
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index 5f7d286871cf..1f73325d1bea 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -25,7 +25,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
@@ -36,7 +35,7 @@
#define PWM_SR 0x0C
#define PWM_ISR 0x1C
/* Bit field in SR */
-#define PWM_SR_ALL_CH_ON 0x0F
+#define PWM_SR_ALL_CH_MASK 0x0F
/* The following register is PWM channel related registers */
#define PWM_CH_REG_OFFSET 0x200
@@ -464,6 +463,42 @@ static const struct of_device_id atmel_pwm_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, atmel_pwm_dt_ids);
+static int atmel_pwm_enable_clk_if_on(struct atmel_pwm_chip *atmel_pwm, bool on)
+{
+ unsigned int i, cnt = 0;
+ unsigned long sr;
+ int ret = 0;
+
+ sr = atmel_pwm_readl(atmel_pwm, PWM_SR) & PWM_SR_ALL_CH_MASK;
+ if (!sr)
+ return 0;
+
+ cnt = bitmap_weight(&sr, atmel_pwm->chip.npwm);
+
+ if (!on)
+ goto disable_clk;
+
+ for (i = 0; i < cnt; i++) {
+ ret = clk_enable(atmel_pwm->clk);
+ if (ret) {
+ dev_err(atmel_pwm->chip.dev,
+ "failed to enable clock for pwm %pe\n",
+ ERR_PTR(ret));
+
+ cnt = i;
+ goto disable_clk;
+ }
+ }
+
+ return 0;
+
+disable_clk:
+ while (cnt--)
+ clk_disable(atmel_pwm->clk);
+
+ return ret;
+}
+
static int atmel_pwm_probe(struct platform_device *pdev)
{
struct atmel_pwm_chip *atmel_pwm;
@@ -482,51 +517,39 @@ static int atmel_pwm_probe(struct platform_device *pdev)
if (IS_ERR(atmel_pwm->base))
return PTR_ERR(atmel_pwm->base);
- atmel_pwm->clk = devm_clk_get(&pdev->dev, NULL);
+ atmel_pwm->clk = devm_clk_get_prepared(&pdev->dev, NULL);
if (IS_ERR(atmel_pwm->clk))
- return PTR_ERR(atmel_pwm->clk);
-
- ret = clk_prepare(atmel_pwm->clk);
- if (ret) {
- dev_err(&pdev->dev, "failed to prepare PWM clock\n");
- return ret;
- }
+ return dev_err_probe(&pdev->dev, PTR_ERR(atmel_pwm->clk),
+ "failed to get prepared PWM clock\n");
atmel_pwm->chip.dev = &pdev->dev;
atmel_pwm->chip.ops = &atmel_pwm_ops;
atmel_pwm->chip.npwm = 4;
- ret = pwmchip_add(&atmel_pwm->chip);
+ ret = atmel_pwm_enable_clk_if_on(atmel_pwm, true);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_pwmchip_add(&pdev->dev, &atmel_pwm->chip);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to add PWM chip %d\n", ret);
- goto unprepare_clk;
+ dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
+ goto disable_clk;
}
- platform_set_drvdata(pdev, atmel_pwm);
+ return 0;
- return ret;
+disable_clk:
+ atmel_pwm_enable_clk_if_on(atmel_pwm, false);
-unprepare_clk:
- clk_unprepare(atmel_pwm->clk);
return ret;
}
-static void atmel_pwm_remove(struct platform_device *pdev)
-{
- struct atmel_pwm_chip *atmel_pwm = platform_get_drvdata(pdev);
-
- pwmchip_remove(&atmel_pwm->chip);
-
- clk_unprepare(atmel_pwm->clk);
-}
-
static struct platform_driver atmel_pwm_driver = {
.driver = {
.name = "atmel-pwm",
.of_match_table = of_match_ptr(atmel_pwm_dt_ids),
},
.probe = atmel_pwm_probe,
- .remove_new = atmel_pwm_remove,
};
module_platform_driver(atmel_pwm_driver);
diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c
index 4fa6e249e4cf..e5b00cc9f7a7 100644
--- a/drivers/pwm/pwm-bcm-kona.c
+++ b/drivers/pwm/pwm-bcm-kona.c
@@ -61,9 +61,9 @@ struct kona_pwmc {
struct clk *clk;
};
-static inline struct kona_pwmc *to_kona_pwmc(struct pwm_chip *_chip)
+static inline struct kona_pwmc *to_kona_pwmc(struct pwm_chip *chip)
{
- return container_of(_chip, struct kona_pwmc, chip);
+ return container_of(chip, struct kona_pwmc, chip);
}
/*
diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c
index 0c5992a046b2..0971c666afd1 100644
--- a/drivers/pwm/pwm-berlin.c
+++ b/drivers/pwm/pwm-berlin.c
@@ -13,6 +13,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c
index 4703b4a0b6e4..b9f063dc6b5f 100644
--- a/drivers/pwm/pwm-crc.c
+++ b/drivers/pwm/pwm-crc.c
@@ -34,9 +34,9 @@ struct crystalcove_pwm {
struct regmap *regmap;
};
-static inline struct crystalcove_pwm *to_crc_pwm(struct pwm_chip *pc)
+static inline struct crystalcove_pwm *to_crc_pwm(struct pwm_chip *chip)
{
- return container_of(pc, struct crystalcove_pwm, chip);
+ return container_of(chip, struct crystalcove_pwm, chip);
}
static int crc_pwm_calc_clk_div(int period_ns)
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index 74e863aa1d8d..baaac0c33aa0 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -6,6 +6,7 @@
*/
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
@@ -37,9 +38,9 @@ struct cros_ec_pwm {
u16 duty_cycle;
};
-static inline struct cros_ec_pwm_device *pwm_to_cros_ec_pwm(struct pwm_chip *c)
+static inline struct cros_ec_pwm_device *pwm_to_cros_ec_pwm(struct pwm_chip *chip)
{
- return container_of(c, struct cros_ec_pwm_device, chip);
+ return container_of(chip, struct cros_ec_pwm_device, chip);
}
static int cros_ec_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -218,14 +219,14 @@ static int cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
}
static struct pwm_device *
-cros_ec_pwm_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
+cros_ec_pwm_xlate(struct pwm_chip *chip, const struct of_phandle_args *args)
{
struct pwm_device *pwm;
- if (args->args[0] >= pc->npwm)
+ if (args->args[0] >= chip->npwm)
return ERR_PTR(-EINVAL);
- pwm = pwm_request_from_chip(pc, args->args[0], NULL);
+ pwm = pwm_request_from_chip(chip, args->args[0], NULL);
if (IS_ERR(pwm))
return pwm;
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 5caadbd6194e..b7c6045c5d08 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -11,8 +11,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pwm.h>
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
index b95df1a96127..f7ba6fe9a349 100644
--- a/drivers/pwm/pwm-hibvt.c
+++ b/drivers/pwm/pwm-hibvt.c
@@ -10,7 +10,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/reset.h>
diff --git a/drivers/pwm/pwm-imx1.c b/drivers/pwm/pwm-imx1.c
index 1f2eb1c8ff6c..0651983bed19 100644
--- a/drivers/pwm/pwm-imx1.c
+++ b/drivers/pwm/pwm-imx1.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index 3b7067f6cd0d..ef1293f2a897 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -15,7 +15,7 @@
#include <linux/mfd/ingenic-tcu.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index 35675e4058c6..4b133a17f4be 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -10,6 +10,7 @@
#include <linux/err.h>
#include <linux/mfd/lp3943.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
@@ -24,9 +25,9 @@ struct lp3943_pwm {
struct lp3943_platform_data *pdata;
};
-static inline struct lp3943_pwm *to_lp3943_pwm(struct pwm_chip *_chip)
+static inline struct lp3943_pwm *to_lp3943_pwm(struct pwm_chip *chip)
{
- return container_of(_chip, struct lp3943_pwm, chip);
+ return container_of(chip, struct lp3943_pwm, chip);
}
static struct lp3943_pwm_map *
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
index b9bf5b366f4b..7a19a840bca5 100644
--- a/drivers/pwm/pwm-lpc18xx-sct.c
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -22,6 +22,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
@@ -366,30 +367,21 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
if (IS_ERR(lpc18xx_pwm->base))
return PTR_ERR(lpc18xx_pwm->base);
- lpc18xx_pwm->pwm_clk = devm_clk_get(&pdev->dev, "pwm");
+ lpc18xx_pwm->pwm_clk = devm_clk_get_enabled(&pdev->dev, "pwm");
if (IS_ERR(lpc18xx_pwm->pwm_clk))
return dev_err_probe(&pdev->dev, PTR_ERR(lpc18xx_pwm->pwm_clk),
"failed to get pwm clock\n");
- ret = clk_prepare_enable(lpc18xx_pwm->pwm_clk);
- if (ret < 0)
- return dev_err_probe(&pdev->dev, ret,
- "could not prepare or enable pwm clock\n");
-
lpc18xx_pwm->clk_rate = clk_get_rate(lpc18xx_pwm->pwm_clk);
- if (!lpc18xx_pwm->clk_rate) {
- ret = dev_err_probe(&pdev->dev,
- -EINVAL, "pwm clock has no frequency\n");
- goto disable_pwmclk;
- }
+ if (!lpc18xx_pwm->clk_rate)
+ return dev_err_probe(&pdev->dev,
+ -EINVAL, "pwm clock has no frequency\n");
/*
* If clkrate is too fast, the calculations in .apply() might overflow.
*/
- if (lpc18xx_pwm->clk_rate > NSEC_PER_SEC) {
- ret = dev_err_probe(&pdev->dev, -EINVAL, "pwm clock to fast\n");
- goto disable_pwmclk;
- }
+ if (lpc18xx_pwm->clk_rate > NSEC_PER_SEC)
+ return dev_err_probe(&pdev->dev, -EINVAL, "pwm clock to fast\n");
mutex_init(&lpc18xx_pwm->res_lock);
mutex_init(&lpc18xx_pwm->period_lock);
@@ -435,18 +427,12 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL, val);
ret = pwmchip_add(&lpc18xx_pwm->chip);
- if (ret < 0) {
- dev_err_probe(&pdev->dev, ret, "pwmchip_add failed\n");
- goto disable_pwmclk;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "pwmchip_add failed\n");
platform_set_drvdata(pdev, lpc18xx_pwm);
return 0;
-
-disable_pwmclk:
- clk_disable_unprepare(lpc18xx_pwm->pwm_clk);
- return ret;
}
static void lpc18xx_pwm_remove(struct platform_device *pdev)
@@ -459,8 +445,6 @@ static void lpc18xx_pwm_remove(struct platform_device *pdev)
val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_CTRL);
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL,
val | LPC18XX_PWM_CTRL_HALT);
-
- clk_disable_unprepare(lpc18xx_pwm->pwm_clk);
}
static struct platform_driver lpc18xx_pwm_driver = {
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 86a0ea0f6955..806f0bb3ad6d 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -51,10 +51,10 @@ static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (duty_cycles > 255)
duty_cycles = 255;
- val = readl(lpc32xx->base + (pwm->hwpwm << 2));
+ val = readl(lpc32xx->base);
val &= ~0xFFFF;
val |= (period_cycles << 8) | duty_cycles;
- writel(val, lpc32xx->base + (pwm->hwpwm << 2));
+ writel(val, lpc32xx->base);
return 0;
}
@@ -69,9 +69,9 @@ static int lpc32xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
if (ret)
return ret;
- val = readl(lpc32xx->base + (pwm->hwpwm << 2));
+ val = readl(lpc32xx->base);
val |= PWM_ENABLE;
- writel(val, lpc32xx->base + (pwm->hwpwm << 2));
+ writel(val, lpc32xx->base);
return 0;
}
@@ -81,9 +81,9 @@ static void lpc32xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
struct lpc32xx_pwm_chip *lpc32xx = to_lpc32xx_pwm_chip(chip);
u32 val;
- val = readl(lpc32xx->base + (pwm->hwpwm << 2));
+ val = readl(lpc32xx->base);
val &= ~PWM_ENABLE;
- writel(val, lpc32xx->base + (pwm->hwpwm << 2));
+ writel(val, lpc32xx->base);
clk_disable_unprepare(lpc32xx->clk);
}
@@ -141,9 +141,9 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
lpc32xx->chip.npwm = 1;
/* If PWM is disabled, configure the output to the default value */
- val = readl(lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
+ val = readl(lpc32xx->base);
val &= ~PWM_PIN_LEVEL;
- writel(val, lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
+ writel(val, lpc32xx->base);
ret = devm_pwmchip_add(&pdev->dev, &lpc32xx->chip);
if (ret < 0) {
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index 7a51d210a877..6adb0ed01906 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 22f54db3ae8e..25519cddc2a9 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -37,7 +37,6 @@
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
diff --git a/drivers/pwm/pwm-microchip-core.c b/drivers/pwm/pwm-microchip-core.c
index 8750b57684a9..e7525c98105e 100644
--- a/drivers/pwm/pwm-microchip-core.c
+++ b/drivers/pwm/pwm-microchip-core.c
@@ -37,7 +37,7 @@
#include <linux/math.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
index 2401b6733241..a83bd6e18b07 100644
--- a/drivers/pwm/pwm-mtk-disp.c
+++ b/drivers/pwm/pwm-mtk-disp.c
@@ -11,7 +11,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
diff --git a/drivers/pwm/pwm-ntxec.c b/drivers/pwm/pwm-ntxec.c
index ab63b081df53..7514ea384ec5 100644
--- a/drivers/pwm/pwm-ntxec.c
+++ b/drivers/pwm/pwm-ntxec.c
@@ -24,7 +24,6 @@
#include <linux/types.h>
struct ntxec_pwm {
- struct device *dev;
struct ntxec *ec;
struct pwm_chip chip;
};
@@ -141,14 +140,13 @@ static int ntxec_pwm_probe(struct platform_device *pdev)
struct ntxec_pwm *priv;
struct pwm_chip *chip;
- pdev->dev.of_node = pdev->dev.parent->of_node;
+ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->ec = ec;
- priv->dev = &pdev->dev;
chip = &priv->chip;
chip->dev = &pdev->dev;
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index 762429d5647f..1e475ed10180 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -15,6 +15,7 @@
* input clock (PWMCR_SD is set) and the output is driven to inactive.
*/
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
@@ -156,13 +157,6 @@ MODULE_DEVICE_TABLE(of, pwm_of_match);
#define pwm_of_match NULL
#endif
-static const struct platform_device_id *pxa_pwm_get_id_dt(struct device *dev)
-{
- const struct of_device_id *id = of_match_device(pwm_of_match, dev);
-
- return id ? id->data : NULL;
-}
-
static int pwm_probe(struct platform_device *pdev)
{
const struct platform_device_id *id = platform_get_device_id(pdev);
@@ -170,7 +164,7 @@ static int pwm_probe(struct platform_device *pdev)
int ret = 0;
if (IS_ENABLED(CONFIG_OF) && id == NULL)
- id = pxa_pwm_get_id_dt(&pdev->dev);
+ id = of_device_get_match_data(&pdev->dev);
if (id == NULL)
return -EINVAL;
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index c1a1f2d864b5..03ee18fb82d5 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -52,9 +52,9 @@ struct rockchip_pwm_data {
u32 enable_conf;
};
-static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *c)
+static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *chip)
{
- return container_of(c, struct rockchip_pwm_chip, chip);
+ return container_of(chip, struct rockchip_pwm_chip, chip);
}
static int rockchip_pwm_get_state(struct pwm_chip *chip,
diff --git a/drivers/pwm/pwm-rz-mtu3.c b/drivers/pwm/pwm-rz-mtu3.c
index bed8bd671e37..a56cecb0e46e 100644
--- a/drivers/pwm/pwm-rz-mtu3.c
+++ b/drivers/pwm/pwm-rz-mtu3.c
@@ -40,7 +40,7 @@
* struct rz_mtu3_channel_io_map - MTU3 pwm channel map
*
* @base_pwm_number: First PWM of a channel
- * @num: number of IOs on the HW channel.
+ * @num_channel_ios: number of IOs on the HW channel.
*/
struct rz_mtu3_channel_io_map {
u8 base_pwm_number;
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index ae49d67ab2b1..eabddb7c7820 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -13,6 +13,7 @@
*/
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
@@ -51,9 +52,9 @@ struct pwm_sifive_ddata {
};
static inline
-struct pwm_sifive_ddata *pwm_sifive_chip_to_ddata(struct pwm_chip *c)
+struct pwm_sifive_ddata *pwm_sifive_chip_to_ddata(struct pwm_chip *chip)
{
- return container_of(c, struct pwm_sifive_ddata, chip);
+ return container_of(chip, struct pwm_sifive_ddata, chip);
}
static int pwm_sifive_request(struct pwm_chip *chip, struct pwm_device *pwm)
diff --git a/drivers/pwm/pwm-sl28cpld.c b/drivers/pwm/pwm-sl28cpld.c
index e64900ad4ba1..9e42e3a74ad6 100644
--- a/drivers/pwm/pwm-sl28cpld.c
+++ b/drivers/pwm/pwm-sl28cpld.c
@@ -38,6 +38,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
@@ -80,12 +81,15 @@
regmap_write((priv)->regmap, (priv)->offset + (reg), (val))
struct sl28cpld_pwm {
- struct pwm_chip pwm_chip;
+ struct pwm_chip chip;
struct regmap *regmap;
u32 offset;
};
-#define sl28cpld_pwm_from_chip(_chip) \
- container_of(_chip, struct sl28cpld_pwm, pwm_chip)
+
+static inline struct sl28cpld_pwm *sl28cpld_pwm_from_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct sl28cpld_pwm, chip);
+}
static int sl28cpld_pwm_get_state(struct pwm_chip *chip,
struct pwm_device *pwm,
@@ -228,12 +232,12 @@ static int sl28cpld_pwm_probe(struct platform_device *pdev)
}
/* Initialize the pwm_chip structure */
- chip = &priv->pwm_chip;
+ chip = &priv->chip;
chip->dev = &pdev->dev;
chip->ops = &sl28cpld_pwm_ops;
chip->npwm = 1;
- ret = devm_pwmchip_add(&pdev->dev, &priv->pwm_chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret) {
dev_err(&pdev->dev, "failed to add PWM chip (%pe)",
ERR_PTR(ret));
diff --git a/drivers/pwm/pwm-sprd.c b/drivers/pwm/pwm-sprd.c
index d43a6fa3f4e0..1499c8c1fe37 100644
--- a/drivers/pwm/pwm-sprd.c
+++ b/drivers/pwm/pwm-sprd.c
@@ -7,6 +7,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index 62e397aeb9aa..3d6be7749e23 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -637,7 +637,7 @@ static int stm32_pwm_probe(struct platform_device *pdev)
priv->chip.ops = &stm32pwm_ops;
priv->chip.npwm = stm32_pwm_detect_channels(priv);
- ret = pwmchip_add(&priv->chip);
+ ret = devm_pwmchip_add(dev, &priv->chip);
if (ret < 0)
return ret;
@@ -646,17 +646,6 @@ static int stm32_pwm_probe(struct platform_device *pdev)
return 0;
}
-static void stm32_pwm_remove(struct platform_device *pdev)
-{
- struct stm32_pwm *priv = platform_get_drvdata(pdev);
- unsigned int i;
-
- for (i = 0; i < priv->chip.npwm; i++)
- pwm_disable(&priv->chip.pwms[i]);
-
- pwmchip_remove(&priv->chip);
-}
-
static int __maybe_unused stm32_pwm_suspend(struct device *dev)
{
struct stm32_pwm *priv = dev_get_drvdata(dev);
@@ -701,7 +690,6 @@ MODULE_DEVICE_TABLE(of, stm32_pwm_of_match);
static struct platform_driver stm32_pwm_driver = {
.probe = stm32_pwm_probe,
- .remove_new = stm32_pwm_remove,
.driver = {
.name = "stm32-pwm",
.of_match_table = stm32_pwm_of_match,
diff --git a/drivers/pwm/pwm-stmpe.c b/drivers/pwm/pwm-stmpe.c
index 5d4a4762ce0c..e205405c4828 100644
--- a/drivers/pwm/pwm-stmpe.c
+++ b/drivers/pwm/pwm-stmpe.c
@@ -61,8 +61,8 @@ static int stmpe_24xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
return 0;
}
-static void stmpe_24xx_pwm_disable(struct pwm_chip *chip,
- struct pwm_device *pwm)
+static int stmpe_24xx_pwm_disable(struct pwm_chip *chip,
+ struct pwm_device *pwm)
{
struct stmpe_pwm *stmpe_pwm = to_stmpe_pwm(chip);
u8 value;
@@ -72,17 +72,16 @@ static void stmpe_24xx_pwm_disable(struct pwm_chip *chip,
if (ret < 0) {
dev_err(chip->dev, "error reading PWM#%u control\n",
pwm->hwpwm);
- return;
+ return ret;
}
value = ret & ~BIT(pwm->hwpwm);
ret = stmpe_reg_write(stmpe_pwm->stmpe, STMPE24XX_PWMCS, value);
- if (ret) {
+ if (ret)
dev_err(chip->dev, "error writing PWM#%u control\n",
pwm->hwpwm);
- return;
- }
+ return ret;
}
/* STMPE 24xx PWM instructions */
@@ -111,7 +110,9 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
/* Make sure we are disabled */
if (pwm_is_enabled(pwm)) {
- stmpe_24xx_pwm_disable(chip, pwm);
+ ret = stmpe_24xx_pwm_disable(chip, pwm);
+ if (ret)
+ return ret;
} else {
/* Connect the PWM to the pin */
pin = pwm->hwpwm;
@@ -269,7 +270,7 @@ static int stmpe_24xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (!state->enabled) {
if (pwm->state.enabled)
- stmpe_24xx_pwm_disable(chip, pwm);
+ return stmpe_24xx_pwm_disable(chip, pwm);
return 0;
}
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index a8790a8fc53e..c84fcf1a13dc 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -17,7 +17,6 @@
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/reset.h>
diff --git a/drivers/pwm/pwm-sunplus.c b/drivers/pwm/pwm-sunplus.c
index d6ebe9f03b35..7705c7b86c3a 100644
--- a/drivers/pwm/pwm-sunplus.c
+++ b/drivers/pwm/pwm-sunplus.c
@@ -23,6 +23,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index 5810abf66e2a..a169a34e0778 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -41,7 +41,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pm_opp.h>
#include <linux/pwm.h>
#include <linux/platform_device.h>
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 109449956307..8c94b266c1b2 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -12,7 +12,7 @@
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/pwm.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
/* ECAP registers and bits definitions */
#define CAP1 0x08
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index bb3959ace6b4..ecbfd7e954ec 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -12,7 +12,7 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
/* EHRPWM registers and bits definitions */
diff --git a/drivers/pwm/pwm-visconti.c b/drivers/pwm/pwm-visconti.c
index e3fb79b3e2a7..7f7591a2384c 100644
--- a/drivers/pwm/pwm-visconti.c
+++ b/drivers/pwm/pwm-visconti.c
@@ -21,7 +21,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index d2c48fd98706..6d46db51daac 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -6,6 +6,7 @@
* Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
*/
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
@@ -18,10 +19,6 @@
#include <asm/div64.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
-
/*
* SoC architecture allocates register space for 4 PWMs but only
* 2 are currently implemented.
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d8e1caaf207e..3137e40fcd3e 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -5542,6 +5542,8 @@ regulator_register(struct device *dev,
goto rinse;
}
device_initialize(&rdev->dev);
+ dev_set_drvdata(&rdev->dev, rdev);
+ rdev->dev.class = &regulator_class;
spin_lock_init(&rdev->err_lock);
/*
@@ -5603,11 +5605,9 @@ regulator_register(struct device *dev,
rdev->supply_name = regulator_desc->supply_name;
/* register with sysfs */
- rdev->dev.class = &regulator_class;
rdev->dev.parent = config->dev;
dev_set_name(&rdev->dev, "regulator.%lu",
(unsigned long) atomic_inc_return(&regulator_no));
- dev_set_drvdata(&rdev->dev, rdev);
/* set regulator constraints */
if (init_data)
@@ -5724,15 +5724,11 @@ wash:
mutex_lock(&regulator_list_mutex);
regulator_ena_gpio_free(rdev);
mutex_unlock(&regulator_list_mutex);
- put_device(&rdev->dev);
- rdev = NULL;
clean:
if (dangling_of_gpiod)
gpiod_put(config->ena_gpiod);
- if (rdev && rdev->dev.of_node)
- of_node_put(rdev->dev.of_node);
- kfree(rdev);
kfree(config);
+ put_device(&rdev->dev);
rinse:
if (dangling_cfg_gpiod)
gpiod_put(cfg->ena_gpiod);
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index 5ad5f3b3a6b5..d49268336553 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -197,7 +197,7 @@ int regulator_set_voltage_sel_pickable_regmap(struct regulator_dev *rdev,
sel += rdev->desc->linear_ranges[i].min_sel;
range = rdev->desc->linear_range_selectors_bitfield[i];
- range <<= ffs(rdev->desc->vsel_mask) - 1;
+ range <<= ffs(rdev->desc->vsel_range_mask) - 1;
if (rdev->desc->vsel_reg == rdev->desc->vsel_range_reg) {
ret = regmap_update_bits(rdev->regmap,
diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c
index b9cda2210c33..65fbd95f1dbb 100644
--- a/drivers/regulator/mt6358-regulator.c
+++ b/drivers/regulator/mt6358-regulator.c
@@ -43,7 +43,7 @@ struct mt6358_regulator_info {
.desc = { \
.name = #vreg, \
.of_match = of_match_ptr(match), \
- .ops = &mt6358_volt_range_ops, \
+ .ops = &mt6358_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MT6358_ID_##vreg, \
.owner = THIS_MODULE, \
@@ -139,7 +139,7 @@ struct mt6358_regulator_info {
.desc = { \
.name = #vreg, \
.of_match = of_match_ptr(match), \
- .ops = &mt6358_volt_range_ops, \
+ .ops = &mt6358_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MT6366_ID_##vreg, \
.owner = THIS_MODULE, \
@@ -450,7 +450,7 @@ static unsigned int mt6358_regulator_get_mode(struct regulator_dev *rdev)
}
}
-static const struct regulator_ops mt6358_volt_range_ops = {
+static const struct regulator_ops mt6358_buck_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -464,6 +464,18 @@ static const struct regulator_ops mt6358_volt_range_ops = {
.get_mode = mt6358_regulator_get_mode,
};
+static const struct regulator_ops mt6358_volt_range_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = mt6358_get_buck_voltage_sel,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6358_get_status,
+};
+
static const struct regulator_ops mt6358_volt_table_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_iterate,
diff --git a/drivers/regulator/tps6287x-regulator.c b/drivers/regulator/tps6287x-regulator.c
index d022184a8e7d..9b7c3d77789e 100644
--- a/drivers/regulator/tps6287x-regulator.c
+++ b/drivers/regulator/tps6287x-regulator.c
@@ -119,7 +119,7 @@ static struct regulator_desc tps6287x_reg = {
.ramp_mask = TPS6287X_CTRL1_VRAMP,
.ramp_delay_table = tps6287x_ramp_table,
.n_ramp_values = ARRAY_SIZE(tps6287x_ramp_table),
- .n_voltages = 256,
+ .n_voltages = 256 * ARRAY_SIZE(tps6287x_voltage_ranges),
.linear_ranges = tps6287x_voltage_ranges,
.n_linear_ranges = ARRAY_SIZE(tps6287x_voltage_ranges),
.linear_range_selectors_bitfield = tps6287x_voltage_range_sel,
diff --git a/drivers/regulator/tps6594-regulator.c b/drivers/regulator/tps6594-regulator.c
index 25ef102c8270..b7f0c8779757 100644
--- a/drivers/regulator/tps6594-regulator.c
+++ b/drivers/regulator/tps6594-regulator.c
@@ -384,21 +384,19 @@ static int tps6594_request_reg_irqs(struct platform_device *pdev,
if (irq < 0)
return -EINVAL;
- irq_data[*irq_idx + j].dev = tps->dev;
- irq_data[*irq_idx + j].type = irq_type;
- irq_data[*irq_idx + j].rdev = rdev;
+ irq_data[*irq_idx].dev = tps->dev;
+ irq_data[*irq_idx].type = irq_type;
+ irq_data[*irq_idx].rdev = rdev;
error = devm_request_threaded_irq(tps->dev, irq, NULL,
- tps6594_regulator_irq_handler,
- IRQF_ONESHOT,
- irq_type->irq_name,
- &irq_data[*irq_idx]);
- (*irq_idx)++;
+ tps6594_regulator_irq_handler, IRQF_ONESHOT,
+ irq_type->irq_name, &irq_data[*irq_idx]);
if (error) {
dev_err(tps->dev, "tps6594 failed to request %s IRQ %d: %d\n",
irq_type->irq_name, irq, error);
return error;
}
+ (*irq_idx)++;
}
return 0;
}
@@ -420,8 +418,8 @@ static int tps6594_regulator_probe(struct platform_device *pdev)
int error, i, irq, multi, delta;
int irq_idx = 0;
int buck_idx = 0;
- int ext_reg_irq_nb = 2;
-
+ size_t ext_reg_irq_nb = 2;
+ size_t reg_irq_nb;
enum {
MULTI_BUCK12,
MULTI_BUCK123,
@@ -484,15 +482,16 @@ static int tps6594_regulator_probe(struct platform_device *pdev)
}
}
- if (tps->chip_id == LP8764)
+ if (tps->chip_id == LP8764) {
/* There is only 4 buck on LP8764 */
buck_configured[4] = 1;
+ reg_irq_nb = size_mul(REGS_INT_NB, (BUCK_NB - 1));
+ } else {
+ reg_irq_nb = size_mul(REGS_INT_NB, (size_add(BUCK_NB, LDO_NB)));
+ }
- irq_data = devm_kmalloc_array(tps->dev,
- REGS_INT_NB * sizeof(struct tps6594_regulator_irq_data),
- ARRAY_SIZE(tps6594_bucks_irq_types) +
- ARRAY_SIZE(tps6594_ldos_irq_types),
- GFP_KERNEL);
+ irq_data = devm_kmalloc_array(tps->dev, reg_irq_nb,
+ sizeof(struct tps6594_regulator_irq_data), GFP_KERNEL);
if (!irq_data)
return -ENOMEM;
diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
index d95fa5586189..8fcda9b74545 100644
--- a/drivers/remoteproc/imx_dsp_rproc.c
+++ b/drivers/remoteproc/imx_dsp_rproc.c
@@ -12,8 +12,7 @@
#include <linux/mailbox_client.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index f9874fc5a80f..8bb293b9f327 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -13,9 +13,9 @@
#include <linux/mailbox_client.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/regmap.h>
@@ -40,6 +40,12 @@
#define IMX7D_M4_STOP (IMX7D_ENABLE_M4 | IMX7D_SW_M4C_RST | \
IMX7D_SW_M4C_NON_SCLR_RST)
+#define IMX8M_M7_STOP (IMX7D_ENABLE_M4 | IMX7D_SW_M4C_RST)
+#define IMX8M_M7_POLL IMX7D_ENABLE_M4
+
+#define IMX8M_GPR22 0x58
+#define IMX8M_GPR22_CM7_CPUWAIT BIT(0)
+
/* Address: 0x020D8000 */
#define IMX6SX_SRC_SCR 0x00
#define IMX6SX_ENABLE_M4 BIT(22)
@@ -91,6 +97,7 @@ static int imx_rproc_detach_pd(struct rproc *rproc);
struct imx_rproc {
struct device *dev;
struct regmap *regmap;
+ struct regmap *gpr;
struct rproc *rproc;
const struct imx_rproc_dcfg *dcfg;
struct imx_rproc_mem mem[IMX_RPROC_MEM_MAX];
@@ -285,6 +292,18 @@ static const struct imx_rproc_att imx_rproc_att_imx6sx[] = {
{ 0x80000000, 0x80000000, 0x60000000, 0 },
};
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mn_mmio = {
+ .src_reg = IMX7D_SRC_SCR,
+ .src_mask = IMX7D_M4_RST_MASK,
+ .src_start = IMX7D_M4_START,
+ .src_stop = IMX8M_M7_STOP,
+ .gpr_reg = IMX8M_GPR22,
+ .gpr_wait = IMX8M_GPR22_CM7_CPUWAIT,
+ .att = imx_rproc_att_imx8mn,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx8mn),
+ .method = IMX_RPROC_MMIO,
+};
+
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mn = {
.att = imx_rproc_att_imx8mn,
.att_size = ARRAY_SIZE(imx_rproc_att_imx8mn),
@@ -365,8 +384,14 @@ static int imx_rproc_start(struct rproc *rproc)
switch (dcfg->method) {
case IMX_RPROC_MMIO:
- ret = regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask,
- dcfg->src_start);
+ if (priv->gpr) {
+ ret = regmap_clear_bits(priv->gpr, dcfg->gpr_reg,
+ dcfg->gpr_wait);
+ } else {
+ ret = regmap_update_bits(priv->regmap, dcfg->src_reg,
+ dcfg->src_mask,
+ dcfg->src_start);
+ }
break;
case IMX_RPROC_SMC:
arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_START, 0, 0, 0, 0, 0, 0, &res);
@@ -395,6 +420,16 @@ static int imx_rproc_stop(struct rproc *rproc)
switch (dcfg->method) {
case IMX_RPROC_MMIO:
+ if (priv->gpr) {
+ ret = regmap_set_bits(priv->gpr, dcfg->gpr_reg,
+ dcfg->gpr_wait);
+ if (ret) {
+ dev_err(priv->dev,
+ "Failed to quiescence M4 platform!\n");
+ return ret;
+ }
+ }
+
ret = regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask,
dcfg->src_stop);
break;
@@ -725,13 +760,22 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
return 0;
}
+static int imx_rproc_notified_idr_cb(int id, void *ptr, void *data)
+{
+ struct rproc *rproc = data;
+
+ rproc_vq_interrupt(rproc, id);
+
+ return 0;
+}
+
static void imx_rproc_vq_work(struct work_struct *work)
{
struct imx_rproc *priv = container_of(work, struct imx_rproc,
rproc_work);
+ struct rproc *rproc = priv->rproc;
- rproc_vq_interrupt(priv->rproc, 0);
- rproc_vq_interrupt(priv->rproc, 1);
+ idr_for_each(&rproc->notifyids, imx_rproc_notified_idr_cb, rproc);
}
static void imx_rproc_rx_callback(struct mbox_client *cl, void *msg)
@@ -983,6 +1027,10 @@ static int imx_rproc_detect_mode(struct imx_rproc *priv)
break;
}
+ priv->gpr = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,iomuxc-gpr");
+ if (IS_ERR(priv->gpr))
+ priv->gpr = NULL;
+
regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
if (IS_ERR(regmap)) {
dev_err(dev, "failed to find syscon\n");
@@ -992,6 +1040,19 @@ static int imx_rproc_detect_mode(struct imx_rproc *priv)
priv->regmap = regmap;
regmap_attach_dev(dev, regmap, &config);
+ if (priv->gpr) {
+ ret = regmap_read(priv->gpr, dcfg->gpr_reg, &val);
+ if (val & dcfg->gpr_wait) {
+ /*
+ * After cold boot, the CM indicates its in wait
+ * state, but not fully powered off. Power it off
+ * fully so firmware can be loaded into it.
+ */
+ imx_rproc_stop(priv->rproc);
+ return 0;
+ }
+ }
+
ret = regmap_read(regmap, dcfg->src_reg, &val);
if (ret) {
dev_err(dev, "Failed to read src\n");
@@ -1133,6 +1194,8 @@ static const struct of_device_id imx_rproc_of_match[] = {
{ .compatible = "fsl,imx8mm-cm4", .data = &imx_rproc_cfg_imx8mq },
{ .compatible = "fsl,imx8mn-cm7", .data = &imx_rproc_cfg_imx8mn },
{ .compatible = "fsl,imx8mp-cm7", .data = &imx_rproc_cfg_imx8mn },
+ { .compatible = "fsl,imx8mn-cm7-mmio", .data = &imx_rproc_cfg_imx8mn_mmio },
+ { .compatible = "fsl,imx8mp-cm7-mmio", .data = &imx_rproc_cfg_imx8mn_mmio },
{ .compatible = "fsl,imx8qxp-cm4", .data = &imx_rproc_cfg_imx8qxp },
{ .compatible = "fsl,imx8qm-cm4", .data = &imx_rproc_cfg_imx8qm },
{ .compatible = "fsl,imx8ulp-cm33", .data = &imx_rproc_cfg_imx8ulp },
diff --git a/drivers/remoteproc/imx_rproc.h b/drivers/remoteproc/imx_rproc.h
index 1c7e2127c758..79a1b8956d14 100644
--- a/drivers/remoteproc/imx_rproc.h
+++ b/drivers/remoteproc/imx_rproc.h
@@ -31,6 +31,8 @@ struct imx_rproc_dcfg {
u32 src_mask;
u32 src_start;
u32 src_stop;
+ u32 gpr_reg;
+ u32 gpr_wait;
const struct imx_rproc_att *att;
size_t att_size;
enum imx_rproc_method method;
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 82ed90f03d91..8f50ab80e56f 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -19,7 +19,8 @@
#include <linux/clk/ti.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
index 2874c8d324f7..327f0c7ee3d6 100644
--- a/drivers/remoteproc/pru_rproc.c
+++ b/drivers/remoteproc/pru_rproc.c
@@ -16,8 +16,9 @@
#include <linux/debugfs.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/remoteproc/pruss.h>
#include <linux/pruss_driver.h>
#include <linux/remoteproc.h>
@@ -109,6 +110,7 @@ struct pru_private_data {
* @dbg_single_step: debug state variable to set PRU into single step mode
* @dbg_continuous: debug state variable to restore PRU execution mode
* @evt_count: number of mapped events
+ * @gpmux_save: saved value for gpmux config
*/
struct pru_rproc {
int id;
@@ -127,6 +129,7 @@ struct pru_rproc {
u32 dbg_single_step;
u32 dbg_continuous;
u8 evt_count;
+ u8 gpmux_save;
};
static inline u32 pru_control_read_reg(struct pru_rproc *pru, unsigned int reg)
@@ -228,6 +231,7 @@ struct rproc *pru_rproc_get(struct device_node *np, int index,
struct device *dev;
const char *fw_name;
int ret;
+ u32 mux;
rproc = __pru_rproc_get(np, index);
if (IS_ERR(rproc))
@@ -252,6 +256,23 @@ struct rproc *pru_rproc_get(struct device_node *np, int index,
if (pru_id)
*pru_id = pru->id;
+ ret = pruss_cfg_get_gpmux(pru->pruss, pru->id, &pru->gpmux_save);
+ if (ret) {
+ dev_err(dev, "failed to get cfg gpmux: %d\n", ret);
+ goto err;
+ }
+
+ /* An error here is acceptable for backward compatibility */
+ ret = of_property_read_u32_index(np, "ti,pruss-gp-mux-sel", index,
+ &mux);
+ if (!ret) {
+ ret = pruss_cfg_set_gpmux(pru->pruss, pru->id, mux);
+ if (ret) {
+ dev_err(dev, "failed to set cfg gpmux: %d\n", ret);
+ goto err;
+ }
+ }
+
ret = of_property_read_string_index(np, "firmware-name", index,
&fw_name);
if (!ret) {
@@ -290,6 +311,8 @@ void pru_rproc_put(struct rproc *rproc)
pru = rproc->priv;
+ pruss_cfg_set_gpmux(pru->pruss, pru->id, pru->gpmux_save);
+
pru_rproc_set_firmware(rproc, NULL);
mutex_lock(&pru->lock);
diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c
index a0d4238492e9..03e5f5d533eb 100644
--- a/drivers/remoteproc/qcom_common.c
+++ b/drivers/remoteproc/qcom_common.c
@@ -29,9 +29,9 @@
#define MAX_NUM_OF_SS 10
#define MAX_REGION_NAME_LENGTH 16
#define SBL_MINIDUMP_SMEM_ID 602
-#define MD_REGION_VALID ('V' << 24 | 'A' << 16 | 'L' << 8 | 'I' << 0)
-#define MD_SS_ENCR_DONE ('D' << 24 | 'O' << 16 | 'N' << 8 | 'E' << 0)
-#define MD_SS_ENABLED ('E' << 24 | 'N' << 16 | 'B' << 8 | 'L' << 0)
+#define MINIDUMP_REGION_VALID ('V' << 24 | 'A' << 16 | 'L' << 8 | 'I' << 0)
+#define MINIDUMP_SS_ENCR_DONE ('D' << 24 | 'O' << 16 | 'N' << 8 | 'E' << 0)
+#define MINIDUMP_SS_ENABLED ('E' << 24 | 'N' << 16 | 'B' << 8 | 'L' << 0)
/**
* struct minidump_region - Minidump region
@@ -125,7 +125,7 @@ static int qcom_add_minidump_segments(struct rproc *rproc, struct minidump_subsy
for (i = 0; i < seg_cnt; i++) {
memcpy_fromio(&region, ptr + i, sizeof(region));
- if (le32_to_cpu(region.valid) == MD_REGION_VALID) {
+ if (le32_to_cpu(region.valid) == MINIDUMP_REGION_VALID) {
name = kstrndup(region.name, MAX_REGION_NAME_LENGTH - 1, GFP_KERNEL);
if (!name) {
iounmap(ptr);
@@ -168,12 +168,21 @@ void qcom_minidump(struct rproc *rproc, unsigned int minidump_id,
*/
if (subsystem->regions_baseptr == 0 ||
le32_to_cpu(subsystem->status) != 1 ||
- le32_to_cpu(subsystem->enabled) != MD_SS_ENABLED ||
- le32_to_cpu(subsystem->encryption_status) != MD_SS_ENCR_DONE) {
+ le32_to_cpu(subsystem->enabled) != MINIDUMP_SS_ENABLED) {
+ return rproc_coredump(rproc);
+ }
+
+ if (le32_to_cpu(subsystem->encryption_status) != MINIDUMP_SS_ENCR_DONE) {
dev_err(&rproc->dev, "Minidump not ready, skipping\n");
return;
}
+ /**
+ * Clear out the dump segments populated by parse_fw before
+ * re-populating them with minidump segments.
+ */
+ rproc_coredump_cleanup(rproc);
+
ret = qcom_add_minidump_segments(rproc, subsystem, rproc_dumpfn_t);
if (ret) {
dev_err(&rproc->dev, "Failed with error: %d while adding minidump entries\n", ret);
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index 6777a3bd6226..6c67514cc493 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -14,8 +14,8 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -637,28 +637,26 @@ static int adsp_init_mmio(struct qcom_adsp *adsp,
static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
{
+ struct reserved_mem *rmem = NULL;
struct device_node *node;
- struct resource r;
- int ret;
node = of_parse_phandle(adsp->dev->of_node, "memory-region", 0);
- if (!node) {
- dev_err(adsp->dev, "no memory-region specified\n");
+ if (node)
+ rmem = of_reserved_mem_lookup(node);
+ of_node_put(node);
+
+ if (!rmem) {
+ dev_err(adsp->dev, "unable to resolve memory-region\n");
return -EINVAL;
}
- ret = of_address_to_resource(node, 0, &r);
- of_node_put(node);
- if (ret)
- return ret;
-
- adsp->mem_phys = adsp->mem_reloc = r.start;
- adsp->mem_size = resource_size(&r);
+ adsp->mem_phys = adsp->mem_reloc = rmem->base;
+ adsp->mem_size = rmem->size;
adsp->mem_region = devm_ioremap_wc(adsp->dev,
adsp->mem_phys, adsp->mem_size);
if (!adsp->mem_region) {
dev_err(adsp->dev, "unable to map memory region: %pa+%zx\n",
- &r.start, adsp->mem_size);
+ &rmem->base, adsp->mem_size);
return -EBUSY;
}
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 70bffc9f33f6..22fe7b5f5236 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -15,9 +15,9 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_reserved_mem.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -71,6 +71,7 @@
#define QDSP6SS_MEM_PWR_CTL 0x0B0
#define QDSP6V6SS_MEM_PWR_CTL 0x034
#define QDSP6SS_STRAP_ACC 0x110
+#define QDSP6V62SS_BHS_STATUS 0x0C4
/* AXI Halt Register Offsets */
#define AXI_HALTREQ_REG 0x0
@@ -123,6 +124,7 @@
#define QDSP6v56_CLAMP_QMC_MEM BIT(22)
#define QDSP6SS_XO_CBCR 0x0038
#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
+#define QDSP6v55_BHS_EN_REST_ACK BIT(0)
/* QDSP6v65 parameters */
#define QDSP6SS_CORE_CBCR 0x20
@@ -130,6 +132,7 @@
#define QDSP6SS_BOOT_CORE_START 0x400
#define QDSP6SS_BOOT_CMD 0x404
#define BOOT_FSM_TIMEOUT 10000
+#define BHS_CHECK_MAX_LOOPS 200
struct reg_info {
struct regulator *reg;
@@ -250,6 +253,7 @@ enum {
MSS_MSM8998,
MSS_SC7180,
MSS_SC7280,
+ MSS_SDM660,
MSS_SDM845,
};
@@ -700,7 +704,8 @@ static int q6v5proc_reset(struct q6v5 *qproc)
} else if (qproc->version == MSS_MSM8909 ||
qproc->version == MSS_MSM8953 ||
qproc->version == MSS_MSM8996 ||
- qproc->version == MSS_MSM8998) {
+ qproc->version == MSS_MSM8998 ||
+ qproc->version == MSS_SDM660) {
if (qproc->version != MSS_MSM8909 &&
qproc->version != MSS_MSM8953)
@@ -734,6 +739,16 @@ static int q6v5proc_reset(struct q6v5 *qproc)
val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
udelay(1);
+ if (qproc->version == MSS_SDM660) {
+ ret = readl_relaxed_poll_timeout(qproc->reg_base + QDSP6V62SS_BHS_STATUS,
+ i, (i & QDSP6v55_BHS_EN_REST_ACK),
+ 1, BHS_CHECK_MAX_LOOPS);
+ if (ret == -ETIMEDOUT) {
+ dev_err(qproc->dev, "BHS_EN_REST_ACK not set!\n");
+ return -ETIMEDOUT;
+ }
+ }
+
/* Put LDO in bypass mode */
val |= QDSP6v56_LDO_BYP;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
@@ -756,7 +771,7 @@ static int q6v5proc_reset(struct q6v5 *qproc)
mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
i = 19;
} else {
- /* MSS_MSM8998 */
+ /* MSS_MSM8998, MSS_SDM660 */
mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
i = 28;
}
@@ -1875,8 +1890,6 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
struct device_node *child;
struct reserved_mem *rmem;
struct device_node *node;
- struct resource r;
- int ret;
/*
* In the absence of mba/mpss sub-child, extract the mba and mpss
@@ -1891,15 +1904,20 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
of_node_put(child);
}
- ret = of_address_to_resource(node, 0, &r);
+ if (!node) {
+ dev_err(qproc->dev, "no mba memory-region specified\n");
+ return -EINVAL;
+ }
+
+ rmem = of_reserved_mem_lookup(node);
of_node_put(node);
- if (ret) {
+ if (!rmem) {
dev_err(qproc->dev, "unable to resolve mba region\n");
- return ret;
+ return -EINVAL;
}
- qproc->mba_phys = r.start;
- qproc->mba_size = resource_size(&r);
+ qproc->mba_phys = rmem->base;
+ qproc->mba_size = rmem->size;
if (!child) {
node = of_parse_phandle(qproc->dev->of_node,
@@ -1910,15 +1928,20 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
of_node_put(child);
}
- ret = of_address_to_resource(node, 0, &r);
+ if (!node) {
+ dev_err(qproc->dev, "no mpss memory-region specified\n");
+ return -EINVAL;
+ }
+
+ rmem = of_reserved_mem_lookup(node);
of_node_put(node);
- if (ret) {
+ if (!rmem) {
dev_err(qproc->dev, "unable to resolve mpss region\n");
- return ret;
+ return -EINVAL;
}
- qproc->mpss_phys = qproc->mpss_reloc = r.start;
- qproc->mpss_size = resource_size(&r);
+ qproc->mpss_phys = qproc->mpss_reloc = rmem->base;
+ qproc->mpss_size = rmem->size;
if (!child) {
node = of_parse_phandle(qproc->dev->of_node, "memory-region", 2);
@@ -2191,6 +2214,37 @@ static const struct rproc_hexagon_res sc7280_mss = {
.version = MSS_SC7280,
};
+static const struct rproc_hexagon_res sdm660_mss = {
+ .hexagon_mba_image = "mba.mbn",
+ .proxy_clk_names = (char*[]){
+ "xo",
+ "qdss",
+ "mem",
+ NULL
+ },
+ .active_clk_names = (char*[]){
+ "iface",
+ "bus",
+ "gpll0_mss",
+ "mnoc_axi",
+ "snoc_axi",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mx",
+ NULL
+ },
+ .need_mem_protection = true,
+ .has_alt_reset = false,
+ .has_mba_logs = false,
+ .has_spare_reg = false,
+ .has_qaccept_regs = false,
+ .has_ext_cntl_regs = false,
+ .has_vq6 = false,
+ .version = MSS_SDM660,
+};
+
static const struct rproc_hexagon_res sdm845_mss = {
.hexagon_mba_image = "mba.mbn",
.proxy_clk_names = (char*[]){
@@ -2473,6 +2527,7 @@ static const struct of_device_id q6v5_of_match[] = {
{ .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
{ .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
{ .compatible = "qcom,sc7280-mss-pil", .data = &sc7280_mss},
+ { .compatible = "qcom,sdm660-mss-pil", .data = &sdm660_mss},
{ .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
{ },
};
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 3153d82037e7..b5447dd2dd35 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -13,8 +13,9 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -425,6 +426,7 @@ static const struct rproc_ops adsp_minidump_ops = {
.start = adsp_start,
.stop = adsp_stop,
.da_to_va = adsp_da_to_va,
+ .parse_fw = qcom_register_dump_segments,
.load = adsp_load,
.panic = adsp_panic,
.coredump = adsp_minidump,
@@ -533,9 +535,8 @@ static void adsp_pds_detach(struct qcom_adsp *adsp, struct device **pds,
static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
{
+ struct reserved_mem *rmem;
struct device_node *node;
- struct resource r;
- int ret;
node = of_parse_phandle(adsp->dev->of_node, "memory-region", 0);
if (!node) {
@@ -543,17 +544,19 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
return -EINVAL;
}
- ret = of_address_to_resource(node, 0, &r);
+ rmem = of_reserved_mem_lookup(node);
of_node_put(node);
- if (ret)
- return ret;
+ if (!rmem) {
+ dev_err(adsp->dev, "unable to resolve memory-region\n");
+ return -EINVAL;
+ }
- adsp->mem_phys = adsp->mem_reloc = r.start;
- adsp->mem_size = resource_size(&r);
+ adsp->mem_phys = adsp->mem_reloc = rmem->base;
+ adsp->mem_size = rmem->size;
adsp->mem_region = devm_ioremap_wc(adsp->dev, adsp->mem_phys, adsp->mem_size);
if (!adsp->mem_region) {
dev_err(adsp->dev, "unable to map memory region: %pa+%zx\n",
- &r.start, adsp->mem_size);
+ &rmem->base, adsp->mem_size);
return -EBUSY;
}
@@ -566,16 +569,19 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
return -EINVAL;
}
- ret = of_address_to_resource(node, 0, &r);
- if (ret)
- return ret;
+ rmem = of_reserved_mem_lookup(node);
+ of_node_put(node);
+ if (!rmem) {
+ dev_err(adsp->dev, "unable to resolve dtb memory-region\n");
+ return -EINVAL;
+ }
- adsp->dtb_mem_phys = adsp->dtb_mem_reloc = r.start;
- adsp->dtb_mem_size = resource_size(&r);
+ adsp->dtb_mem_phys = adsp->dtb_mem_reloc = rmem->base;
+ adsp->dtb_mem_size = rmem->size;
adsp->dtb_mem_region = devm_ioremap_wc(adsp->dev, adsp->dtb_mem_phys, adsp->dtb_mem_size);
if (!adsp->dtb_mem_region) {
dev_err(adsp->dev, "unable to map dtb memory region: %pa+%zx\n",
- &r.start, adsp->dtb_mem_size);
+ &rmem->base, adsp->dtb_mem_size);
return -EBUSY;
}
@@ -584,29 +590,28 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
static int adsp_assign_memory_region(struct qcom_adsp *adsp)
{
+ struct reserved_mem *rmem = NULL;
struct qcom_scm_vmperm perm;
struct device_node *node;
- struct resource r;
int ret;
if (!adsp->region_assign_idx)
return 0;
node = of_parse_phandle(adsp->dev->of_node, "memory-region", adsp->region_assign_idx);
- if (!node) {
- dev_err(adsp->dev, "missing shareable memory-region\n");
+ if (node)
+ rmem = of_reserved_mem_lookup(node);
+ of_node_put(node);
+ if (!rmem) {
+ dev_err(adsp->dev, "unable to resolve shareable memory-region\n");
return -EINVAL;
}
- ret = of_address_to_resource(node, 0, &r);
- if (ret)
- return ret;
-
perm.vmid = QCOM_SCM_VMID_MSS_MSA;
perm.perm = QCOM_SCM_PERM_RW;
- adsp->region_assign_phys = r.start;
- adsp->region_assign_size = resource_size(&r);
+ adsp->region_assign_phys = rmem->base;
+ adsp->region_assign_size = rmem->size;
adsp->region_assign_perms = BIT(QCOM_SCM_VMID_HLOS);
ret = qcom_scm_assign_mem(adsp->region_assign_phys,
@@ -1012,7 +1017,7 @@ static const struct adsp_data sc8180x_mpss_resource = {
.ssctl_id = 0x12,
};
-static const struct adsp_data slpi_resource_init = {
+static const struct adsp_data msm8996_slpi_resource_init = {
.crash_reason_smem = 424,
.firmware_name = "slpi.mdt",
.pas_id = 12,
@@ -1026,7 +1031,7 @@ static const struct adsp_data slpi_resource_init = {
.ssctl_id = 0x16,
};
-static const struct adsp_data sm8150_slpi_resource = {
+static const struct adsp_data sdm845_slpi_resource_init = {
.crash_reason_smem = 424,
.firmware_name = "slpi.mdt",
.pas_id = 12,
@@ -1042,38 +1047,6 @@ static const struct adsp_data sm8150_slpi_resource = {
.ssctl_id = 0x16,
};
-static const struct adsp_data sm8250_slpi_resource = {
- .crash_reason_smem = 424,
- .firmware_name = "slpi.mdt",
- .pas_id = 12,
- .auto_boot = true,
- .proxy_pd_names = (char*[]){
- "lcx",
- "lmx",
- NULL
- },
- .load_state = "slpi",
- .ssr_name = "dsps",
- .sysmon_name = "slpi",
- .ssctl_id = 0x16,
-};
-
-static const struct adsp_data sm8350_slpi_resource = {
- .crash_reason_smem = 424,
- .firmware_name = "slpi.mdt",
- .pas_id = 12,
- .auto_boot = true,
- .proxy_pd_names = (char*[]){
- "lcx",
- "lmx",
- NULL
- },
- .load_state = "slpi",
- .ssr_name = "dsps",
- .sysmon_name = "slpi",
- .ssctl_id = 0x16,
-};
-
static const struct adsp_data wcss_resource_init = {
.crash_reason_smem = 421,
.firmware_name = "wcnss.mdt",
@@ -1182,9 +1155,9 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,msm8953-adsp-pil", .data = &msm8996_adsp_resource},
{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8996-adsp-pil", .data = &msm8996_adsp_resource},
- { .compatible = "qcom,msm8996-slpi-pil", .data = &slpi_resource_init},
+ { .compatible = "qcom,msm8996-slpi-pil", .data = &msm8996_slpi_resource_init},
{ .compatible = "qcom,msm8998-adsp-pas", .data = &msm8996_adsp_resource},
- { .compatible = "qcom,msm8998-slpi-pas", .data = &slpi_resource_init},
+ { .compatible = "qcom,msm8998-slpi-pas", .data = &msm8996_slpi_resource_init},
{ .compatible = "qcom,qcs404-adsp-pas", .data = &adsp_resource_init },
{ .compatible = "qcom,qcs404-cdsp-pas", .data = &cdsp_resource_init },
{ .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init },
@@ -1199,6 +1172,7 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sdm660-adsp-pas", .data = &adsp_resource_init},
{ .compatible = "qcom,sdm845-adsp-pas", .data = &sdm845_adsp_resource_init},
{ .compatible = "qcom,sdm845-cdsp-pas", .data = &sdm845_cdsp_resource_init},
+ { .compatible = "qcom,sdm845-slpi-pas", .data = &sdm845_slpi_resource_init},
{ .compatible = "qcom,sdx55-mpss-pas", .data = &sdx55_mpss_resource},
{ .compatible = "qcom,sm6115-adsp-pas", .data = &adsp_resource_init},
{ .compatible = "qcom,sm6115-cdsp-pas", .data = &cdsp_resource_init},
@@ -1209,17 +1183,17 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sm8150-adsp-pas", .data = &sm8150_adsp_resource},
{ .compatible = "qcom,sm8150-cdsp-pas", .data = &sm8150_cdsp_resource},
{ .compatible = "qcom,sm8150-mpss-pas", .data = &mpss_resource_init},
- { .compatible = "qcom,sm8150-slpi-pas", .data = &sm8150_slpi_resource},
+ { .compatible = "qcom,sm8150-slpi-pas", .data = &sdm845_slpi_resource_init},
{ .compatible = "qcom,sm8250-adsp-pas", .data = &sm8250_adsp_resource},
{ .compatible = "qcom,sm8250-cdsp-pas", .data = &sm8250_cdsp_resource},
- { .compatible = "qcom,sm8250-slpi-pas", .data = &sm8250_slpi_resource},
+ { .compatible = "qcom,sm8250-slpi-pas", .data = &sdm845_slpi_resource_init},
{ .compatible = "qcom,sm8350-adsp-pas", .data = &sm8350_adsp_resource},
{ .compatible = "qcom,sm8350-cdsp-pas", .data = &sm8350_cdsp_resource},
- { .compatible = "qcom,sm8350-slpi-pas", .data = &sm8350_slpi_resource},
+ { .compatible = "qcom,sm8350-slpi-pas", .data = &sdm845_slpi_resource_init},
{ .compatible = "qcom,sm8350-mpss-pas", .data = &mpss_resource_init},
{ .compatible = "qcom,sm8450-adsp-pas", .data = &sm8350_adsp_resource},
{ .compatible = "qcom,sm8450-cdsp-pas", .data = &sm8350_cdsp_resource},
- { .compatible = "qcom,sm8450-slpi-pas", .data = &sm8350_slpi_resource},
+ { .compatible = "qcom,sm8450-slpi-pas", .data = &sdm845_slpi_resource_init},
{ .compatible = "qcom,sm8450-mpss-pas", .data = &sm8450_mpss_resource},
{ .compatible = "qcom,sm8550-adsp-pas", .data = &sm8550_adsp_resource},
{ .compatible = "qcom,sm8550-cdsp-pas", .data = &sm8550_cdsp_resource},
diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
index b437044aa126..cff1fa07d1de 100644
--- a/drivers/remoteproc/qcom_q6v5_wcss.c
+++ b/drivers/remoteproc/qcom_q6v5_wcss.c
@@ -837,8 +837,7 @@ static int q6v5_wcss_init_mmio(struct q6v5_wcss *wcss,
return -ENOMEM;
if (wcss->version == WCSS_IPQ8074) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
- wcss->rmb_base = devm_ioremap_resource(&pdev->dev, res);
+ wcss->rmb_base = devm_platform_ioremap_resource_byname(pdev, "rmb");
if (IS_ERR(wcss->rmb_base))
return PTR_ERR(wcss->rmb_base);
}
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
index 746f56b4bafb..c24e4a882873 100644
--- a/drivers/remoteproc/qcom_sysmon.c
+++ b/drivers/remoteproc/qcom_sysmon.c
@@ -9,7 +9,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/remoteproc/qcom_rproc.h>
#include <linux/rpmsg.h>
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index 1ed0647bc962..90de22c81da9 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -14,8 +14,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -506,27 +506,25 @@ static int wcnss_request_irq(struct qcom_wcnss *wcnss,
static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
{
+ struct reserved_mem *rmem = NULL;
struct device_node *node;
- struct resource r;
- int ret;
node = of_parse_phandle(wcnss->dev->of_node, "memory-region", 0);
- if (!node) {
- dev_err(wcnss->dev, "no memory-region specified\n");
+ if (node)
+ rmem = of_reserved_mem_lookup(node);
+ of_node_put(node);
+
+ if (!rmem) {
+ dev_err(wcnss->dev, "unable to resolve memory-region\n");
return -EINVAL;
}
- ret = of_address_to_resource(node, 0, &r);
- of_node_put(node);
- if (ret)
- return ret;
-
- wcnss->mem_phys = wcnss->mem_reloc = r.start;
- wcnss->mem_size = resource_size(&r);
+ wcnss->mem_phys = wcnss->mem_reloc = rmem->base;
+ wcnss->mem_size = rmem->size;
wcnss->mem_region = devm_ioremap_wc(wcnss->dev, wcnss->mem_phys, wcnss->mem_size);
if (!wcnss->mem_region) {
dev_err(wcnss->dev, "unable to map memory region: %pa+%zx\n",
- &r.start, wcnss->mem_size);
+ &rmem->base, wcnss->mem_size);
return -EBUSY;
}
@@ -538,7 +536,6 @@ static int wcnss_probe(struct platform_device *pdev)
const char *fw_name = WCNSS_FIRMWARE_NAME;
const struct wcnss_data *data;
struct qcom_wcnss *wcnss;
- struct resource *res;
struct rproc *rproc;
void __iomem *mmio;
int ret;
@@ -576,8 +573,7 @@ static int wcnss_probe(struct platform_device *pdev)
mutex_init(&wcnss->iris_lock);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pmu");
- mmio = devm_ioremap_resource(&pdev->dev, res);
+ mmio = devm_platform_ioremap_resource_byname(pdev, "pmu");
if (IS_ERR(mmio)) {
ret = PTR_ERR(mmio);
goto free_rproc;
diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c
index 09720ddddc85..dd36fd077911 100644
--- a/drivers/remoteproc/qcom_wcnss_iris.c
+++ b/drivers/remoteproc/qcom_wcnss_iris.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/remoteproc/rcar_rproc.c b/drivers/remoteproc/rcar_rproc.c
index 90e8769d5624..cc17e8421f65 100644
--- a/drivers/remoteproc/rcar_rproc.c
+++ b/drivers/remoteproc/rcar_rproc.c
@@ -5,8 +5,9 @@
#include <linux/limits.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/remoteproc.h>
#include <linux/reset.h>
diff --git a/drivers/remoteproc/remoteproc_coredump.c b/drivers/remoteproc/remoteproc_coredump.c
index bc0e1603a7a3..6ede8c0c93ad 100644
--- a/drivers/remoteproc/remoteproc_coredump.c
+++ b/drivers/remoteproc/remoteproc_coredump.c
@@ -32,6 +32,7 @@ void rproc_coredump_cleanup(struct rproc *rproc)
kfree(entry);
}
}
+EXPORT_SYMBOL_GPL(rproc_coredump_cleanup);
/**
* rproc_coredump_add_segment() - add segment of device memory to coredump
@@ -327,6 +328,7 @@ void rproc_coredump(struct rproc *rproc)
*/
wait_for_completion(&dump_state.dump_done);
}
+EXPORT_SYMBOL_GPL(rproc_coredump);
/**
* rproc_coredump_using_sections() - perform coredump using section headers
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index d4dbb8d1d80c..f62a82d71dfa 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -76,10 +76,6 @@ extern struct class rproc_class;
int rproc_init_sysfs(void);
void rproc_exit_sysfs(void);
-/* from remoteproc_coredump.c */
-void rproc_coredump_cleanup(struct rproc *rproc);
-void rproc_coredump(struct rproc *rproc);
-
#ifdef CONFIG_REMOTEPROC_CDEV
void rproc_init_cdev(void);
void rproc_exit_cdev(void);
diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c
index 4ed9467897e5..d17719384c16 100644
--- a/drivers/remoteproc/st_slim_rproc.c
+++ b/drivers/remoteproc/st_slim_rproc.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/remoteproc.h>
#include <linux/remoteproc/st_slim_rproc.h>
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index cf073bac79f7..9d9b13530f78 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -12,9 +12,9 @@
#include <linux/mailbox_client.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
@@ -921,7 +921,7 @@ static void stm32_rproc_remove(struct platform_device *pdev)
rproc_free(rproc);
}
-static int __maybe_unused stm32_rproc_suspend(struct device *dev)
+static int stm32_rproc_suspend(struct device *dev)
{
struct rproc *rproc = dev_get_drvdata(dev);
struct stm32_rproc *ddata = rproc->priv;
@@ -932,7 +932,7 @@ static int __maybe_unused stm32_rproc_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused stm32_rproc_resume(struct device *dev)
+static int stm32_rproc_resume(struct device *dev)
{
struct rproc *rproc = dev_get_drvdata(dev);
struct stm32_rproc *ddata = rproc->priv;
@@ -943,16 +943,16 @@ static int __maybe_unused stm32_rproc_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops,
- stm32_rproc_suspend, stm32_rproc_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops,
+ stm32_rproc_suspend, stm32_rproc_resume);
static struct platform_driver stm32_rproc_driver = {
.probe = stm32_rproc_probe,
.remove_new = stm32_rproc_remove,
.driver = {
.name = "stm32-rproc",
- .pm = &stm32_rproc_pm_ops,
- .of_match_table = of_match_ptr(stm32_rproc_match),
+ .pm = pm_ptr(&stm32_rproc_pm_ops),
+ .of_match_table = stm32_rproc_match,
},
};
module_platform_driver(stm32_rproc_driver);
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
index ec626a37fef6..ef8415a7cd54 100644
--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -9,7 +9,7 @@
#include <linux/io.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_reserved_mem.h>
#include <linux/omap-mailbox.h>
#include <linux/platform_device.h>
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index 23fe44d4d7a5..ad3415a3851b 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -12,9 +12,10 @@
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_reserved_mem.h>
+#include <linux/of_platform.h>
#include <linux/omap-mailbox.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c
index 120dc7d2dac1..36a55f7ffa64 100644
--- a/drivers/remoteproc/wkup_m3_rproc.c
+++ b/drivers/remoteproc/wkup_m3_rproc.c
@@ -12,7 +12,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 1beb40a1d3df..82d460ff4777 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -200,9 +200,15 @@ static const struct rpmsg_endpoint_ops glink_endpoint_ops;
#define GLINK_CMD_TX_DATA_CONT 12
#define GLINK_CMD_READ_NOTIF 13
#define GLINK_CMD_RX_DONE_W_REUSE 14
+#define GLINK_CMD_SIGNALS 15
#define GLINK_FEATURE_INTENTLESS BIT(1)
+#define NATIVE_DTR_SIG NATIVE_DSR_SIG
+#define NATIVE_DSR_SIG BIT(31)
+#define NATIVE_RTS_SIG NATIVE_CTS_SIG
+#define NATIVE_CTS_SIG BIT(30)
+
static void qcom_glink_rx_done_work(struct work_struct *work);
static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink,
@@ -221,6 +227,10 @@ static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink,
channel->glink = glink;
channel->name = kstrdup(name, GFP_KERNEL);
+ if (!channel->name) {
+ kfree(channel);
+ return ERR_PTR(-ENOMEM);
+ }
init_completion(&channel->open_req);
init_completion(&channel->open_ack);
@@ -1025,6 +1035,52 @@ static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid)
return 0;
}
+/**
+ * qcom_glink_set_flow_control() - convert a signal cmd to wire format and transmit
+ * @ept: Rpmsg endpoint for channel.
+ * @pause: Pause transmission
+ * @dst: destination address of the endpoint
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int qcom_glink_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u32 dst)
+{
+ struct glink_channel *channel = to_glink_channel(ept);
+ struct qcom_glink *glink = channel->glink;
+ struct glink_msg msg;
+ u32 sigs = 0;
+
+ if (pause)
+ sigs |= NATIVE_DTR_SIG | NATIVE_RTS_SIG;
+
+ msg.cmd = cpu_to_le16(GLINK_CMD_SIGNALS);
+ msg.param1 = cpu_to_le16(channel->lcid);
+ msg.param2 = cpu_to_le32(sigs);
+
+ return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
+}
+
+static void qcom_glink_handle_signals(struct qcom_glink *glink,
+ unsigned int rcid, unsigned int sigs)
+{
+ struct glink_channel *channel;
+ unsigned long flags;
+ bool enable;
+
+ spin_lock_irqsave(&glink->idr_lock, flags);
+ channel = idr_find(&glink->rcids, rcid);
+ spin_unlock_irqrestore(&glink->idr_lock, flags);
+ if (!channel) {
+ dev_err(glink->dev, "signal for non-existing channel\n");
+ return;
+ }
+
+ enable = sigs & NATIVE_DSR_SIG || sigs & NATIVE_CTS_SIG;
+
+ if (channel->ept.flow_cb)
+ channel->ept.flow_cb(channel->ept.rpdev, channel->ept.priv, enable);
+}
+
void qcom_glink_native_rx(struct qcom_glink *glink)
{
struct glink_msg msg;
@@ -1086,6 +1142,10 @@ void qcom_glink_native_rx(struct qcom_glink *glink)
qcom_glink_handle_intent_req_ack(glink, param1, param2);
qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break;
+ case GLINK_CMD_SIGNALS:
+ qcom_glink_handle_signals(glink, param1, param2);
+ qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
+ break;
default:
dev_err(glink->dev, "unhandled rx cmd: %d\n", cmd);
ret = -EINVAL;
@@ -1446,6 +1506,7 @@ static const struct rpmsg_endpoint_ops glink_endpoint_ops = {
.sendto = qcom_glink_sendto,
.trysend = qcom_glink_trysend,
.trysendto = qcom_glink_trysendto,
+ .set_flow_control = qcom_glink_set_flow_control,
};
static void qcom_glink_rpdev_release(struct device *dev)
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index a271fceb16f4..09833ad05da7 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -52,6 +52,8 @@ static DEFINE_IDA(rpmsg_minor_ida);
* @readq: wait object for incoming queue
* @default_ept: set to channel default endpoint if the default endpoint should be re-used
* on device open to prevent endpoint address update.
+ * remote_flow_restricted: to indicate if the remote has requested for flow to be limited
+ * remote_flow_updated: to indicate if the flow control has been requested
*/
struct rpmsg_eptdev {
struct device dev;
@@ -68,6 +70,8 @@ struct rpmsg_eptdev {
struct sk_buff_head queue;
wait_queue_head_t readq;
+ bool remote_flow_restricted;
+ bool remote_flow_updated;
};
int rpmsg_chrdev_eptdev_destroy(struct device *dev, void *data)
@@ -116,6 +120,18 @@ static int rpmsg_ept_cb(struct rpmsg_device *rpdev, void *buf, int len,
return 0;
}
+static int rpmsg_ept_flow_cb(struct rpmsg_device *rpdev, void *priv, bool enable)
+{
+ struct rpmsg_eptdev *eptdev = priv;
+
+ eptdev->remote_flow_restricted = enable;
+ eptdev->remote_flow_updated = true;
+
+ wake_up_interruptible(&eptdev->readq);
+
+ return 0;
+}
+
static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
{
struct rpmsg_eptdev *eptdev = cdev_to_eptdev(inode->i_cdev);
@@ -152,6 +168,7 @@ static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
return -EINVAL;
}
+ ept->flow_cb = rpmsg_ept_flow_cb;
eptdev->ept = ept;
filp->private_data = eptdev;
mutex_unlock(&eptdev->ept_lock);
@@ -172,6 +189,7 @@ static int rpmsg_eptdev_release(struct inode *inode, struct file *filp)
eptdev->ept = NULL;
}
mutex_unlock(&eptdev->ept_lock);
+ eptdev->remote_flow_updated = false;
/* Discard all SKBs */
skb_queue_purge(&eptdev->queue);
@@ -285,6 +303,9 @@ static __poll_t rpmsg_eptdev_poll(struct file *filp, poll_table *wait)
if (!skb_queue_empty(&eptdev->queue))
mask |= EPOLLIN | EPOLLRDNORM;
+ if (eptdev->remote_flow_updated)
+ mask |= EPOLLPRI;
+
mutex_lock(&eptdev->ept_lock);
mask |= rpmsg_poll(eptdev->ept, filp, wait);
mutex_unlock(&eptdev->ept_lock);
@@ -297,14 +318,35 @@ static long rpmsg_eptdev_ioctl(struct file *fp, unsigned int cmd,
{
struct rpmsg_eptdev *eptdev = fp->private_data;
- if (cmd != RPMSG_DESTROY_EPT_IOCTL)
- return -EINVAL;
+ bool set;
+ int ret;
- /* Don't allow to destroy a default endpoint. */
- if (eptdev->default_ept)
- return -EINVAL;
+ switch (cmd) {
+ case RPMSG_GET_OUTGOING_FLOWCONTROL:
+ eptdev->remote_flow_updated = false;
+ ret = put_user(eptdev->remote_flow_restricted, (int __user *)arg);
+ break;
+ case RPMSG_SET_INCOMING_FLOWCONTROL:
+ if (arg > 1) {
+ ret = -EINVAL;
+ break;
+ }
+ set = !!arg;
+ ret = rpmsg_set_flow_control(eptdev->ept, set, eptdev->chinfo.dst);
+ break;
+ case RPMSG_DESTROY_EPT_IOCTL:
+ /* Don't allow to destroy a default endpoint. */
+ if (eptdev->default_ept) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = rpmsg_chrdev_eptdev_destroy(&eptdev->dev, NULL);
+ break;
+ default:
+ ret = -EINVAL;
+ }
- return rpmsg_chrdev_eptdev_destroy(&eptdev->dev, NULL);
+ return ret;
}
static const struct file_operations rpmsg_eptdev_fops = {
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index 5039df757127..32b550c91d9f 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -331,6 +331,25 @@ int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
EXPORT_SYMBOL(rpmsg_trysend_offchannel);
/**
+ * rpmsg_set_flow_control() - request remote to pause/resume transmission
+ * @ept: the rpmsg endpoint
+ * @pause: pause transmission
+ * @dst: destination address of the endpoint
+ *
+ * Return: 0 on success and an appropriate error value on failure.
+ */
+int rpmsg_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u32 dst)
+{
+ if (WARN_ON(!ept))
+ return -EINVAL;
+ if (!ept->ops->set_flow_control)
+ return -EOPNOTSUPP;
+
+ return ept->ops->set_flow_control(ept, pause, dst);
+}
+EXPORT_SYMBOL_GPL(rpmsg_set_flow_control);
+
+/**
* rpmsg_get_mtu() - get maximum transmission buffer size for sending message.
* @ept: the rpmsg endpoint
*
@@ -539,6 +558,8 @@ static int rpmsg_dev_probe(struct device *dev)
rpdev->ept = ept;
rpdev->src = ept->addr;
+
+ ept->flow_cb = rpdrv->flowcontrol;
}
err = rpdrv->probe(rpdev);
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
index 39b646d0d40d..b950d6f790a3 100644
--- a/drivers/rpmsg/rpmsg_internal.h
+++ b/drivers/rpmsg/rpmsg_internal.h
@@ -55,6 +55,7 @@ struct rpmsg_device_ops {
* @trysendto: see @rpmsg_trysendto(), optional
* @trysend_offchannel: see @rpmsg_trysend_offchannel(), optional
* @poll: see @rpmsg_poll(), optional
+ * @set_flow_control: see @rpmsg_set_flow_control(), optional
* @get_mtu: see @rpmsg_get_mtu(), optional
*
* Indirection table for the operations that a rpmsg backend should implement.
@@ -75,6 +76,7 @@ struct rpmsg_endpoint_ops {
void *data, int len);
__poll_t (*poll)(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
+ int (*set_flow_control)(struct rpmsg_endpoint *ept, bool pause, u32 dst);
ssize_t (*get_mtu)(struct rpmsg_endpoint *ept);
};
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 05f4b2d66290..d7502433c78a 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -904,9 +904,9 @@ config RTC_DRV_PCF2127
select REGMAP_SPI if SPI_MASTER
select WATCHDOG_CORE if WATCHDOG
help
- If you say yes here you get support for the NXP PCF2127/29 RTC
+ If you say yes here you get support for the NXP PCF2127/29/31 RTC
chips with integrated quartz crystal for industrial applications.
- Both chips also have watchdog timer and tamper switch detection
+ These chips also have watchdog timer and tamper switch detection
features.
PCF2127 has an additional feature of 512 bytes battery backed
@@ -1196,6 +1196,7 @@ config RTC_DRV_MSM6242
config RTC_DRV_BQ4802
tristate "TI BQ4802"
depends on HAS_IOMEM && HAS_IOPORT
+ depends on SPARC || COMPILE_TEST
help
If you say Y here you will get support for the TI
BQ4802 RTC chip.
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 499d89150afc..1b63111cdda2 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -376,7 +376,7 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
err = rtc_valid_tm(&alarm->time);
done:
- if (err)
+ if (err && alarm->enabled)
dev_warn(&rtc->dev, "invalid alarm value: %ptR\n",
&alarm->time);
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
index e08d3181bd2a..fde2b8054c2e 100644
--- a/drivers/rtc/rtc-abx80x.c
+++ b/drivers/rtc/rtc-abx80x.c
@@ -15,7 +15,7 @@
#include <linux/i2c.h>
#include <linux/kstrtox.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/rtc.h>
#include <linux/watchdog.h>
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index b4139c200676..569c1054d6b0 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -11,7 +11,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
@@ -474,7 +473,6 @@ static const struct armada38x_rtc_data armada8k_data = {
.alarm = ALARM2,
};
-#ifdef CONFIG_OF
static const struct of_device_id armada38x_rtc_of_match_table[] = {
{
.compatible = "marvell,armada-380-rtc",
@@ -487,7 +485,6 @@ static const struct of_device_id armada38x_rtc_of_match_table[] = {
{}
};
MODULE_DEVICE_TABLE(of, armada38x_rtc_of_match_table);
-#endif
static __init int armada38x_rtc_probe(struct platform_device *pdev)
{
@@ -577,7 +574,7 @@ static struct platform_driver armada38x_rtc_driver = {
.driver = {
.name = "armada38x-rtc",
.pm = &armada38x_rtc_pm_ops,
- .of_match_table = of_match_ptr(armada38x_rtc_of_match_table),
+ .of_match_table = armada38x_rtc_of_match_table,
},
};
diff --git a/drivers/rtc/rtc-aspeed.c b/drivers/rtc/rtc-aspeed.c
index a93352ed3aec..880b015eebaf 100644
--- a/drivers/rtc/rtc-aspeed.c
+++ b/drivers/rtc/rtc-aspeed.c
@@ -118,7 +118,7 @@ MODULE_DEVICE_TABLE(of, aspeed_rtc_match);
static struct platform_driver aspeed_rtc_driver = {
.driver = {
.name = "aspeed-rtc",
- .of_match_table = of_match_ptr(aspeed_rtc_match),
+ .of_match_table = aspeed_rtc_match,
},
};
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index e9d17232d0a8..add4f71d7b3b 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -22,7 +22,6 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
@@ -642,7 +641,7 @@ static struct platform_driver at91_rtc_driver = {
.driver = {
.name = "at91_rtc",
.pm = &at91_rtc_pm_ops,
- .of_match_table = of_match_ptr(at91_rtc_dt_ids),
+ .of_match_table = at91_rtc_dt_ids,
},
};
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 610f27dfc462..f93bee96e362 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -534,7 +534,7 @@ static struct platform_driver at91_rtc_driver = {
.driver = {
.name = "rtc-at91sam9",
.pm = &at91_rtc_pm_ops,
- .of_match_table = of_match_ptr(at91_rtc_dt_ids),
+ .of_match_table = at91_rtc_dt_ids,
},
};
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index c9416fe8542d..228fb2d11c70 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -913,6 +913,10 @@ static inline void cmos_check_acpi_rtc_status(struct device *dev,
#define INITSECTION __init
#endif
+#define SECS_PER_DAY (24 * 60 * 60)
+#define SECS_PER_MONTH (28 * SECS_PER_DAY)
+#define SECS_PER_YEAR (365 * SECS_PER_DAY)
+
static int INITSECTION
cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
{
@@ -1019,6 +1023,13 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
goto cleanup0;
}
+ if (cmos_rtc.mon_alrm)
+ cmos_rtc.rtc->alarm_offset_max = SECS_PER_YEAR - 1;
+ else if (cmos_rtc.day_alrm)
+ cmos_rtc.rtc->alarm_offset_max = SECS_PER_MONTH - 1;
+ else
+ cmos_rtc.rtc->alarm_offset_max = SECS_PER_DAY - 1;
+
rename_region(ports, dev_name(&cmos_rtc.rtc->dev));
if (!mc146818_does_rtc_work()) {
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index 998ab8606f0b..0cd397c04ff0 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -182,21 +182,15 @@ static int cros_ec_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
ret = cros_ec_rtc_set(cros_ec, EC_CMD_RTC_SET_ALARM, alarm_offset);
if (ret < 0) {
- if (ret == -EINVAL && alarm_offset >= SECS_PER_DAY) {
- /*
- * RTC chips on some older Chromebooks can only handle
- * alarms up to 24h in the future. Try to set an alarm
- * below that limit to avoid suspend failures.
- */
- ret = cros_ec_rtc_set(cros_ec, EC_CMD_RTC_SET_ALARM,
- SECS_PER_DAY - 1);
- }
-
- if (ret < 0) {
- dev_err(dev, "error setting alarm in %u seconds: %d\n",
- alarm_offset, ret);
- return ret;
- }
+ dev_err(dev, "error setting alarm in %u seconds: %d\n",
+ alarm_offset, ret);
+ /*
+ * The EC code returns -EINVAL if the alarm time is too
+ * far in the future. Convert it to the expected error code.
+ */
+ if (ret == -EINVAL)
+ ret = -ERANGE;
+ return ret;
}
return 0;
@@ -355,6 +349,20 @@ static int cros_ec_rtc_probe(struct platform_device *pdev)
cros_ec_rtc->rtc->ops = &cros_ec_rtc_ops;
cros_ec_rtc->rtc->range_max = U32_MAX;
+ /*
+ * The RTC on some older Chromebooks can only handle alarms less than
+ * 24 hours in the future. The only way to find out is to try to set an
+ * alarm further in the future. If that fails, assume that the RTC
+ * connected to the EC can only handle less than 24 hours of alarm
+ * window.
+ */
+ ret = cros_ec_rtc_set(cros_ec, EC_CMD_RTC_SET_ALARM, SECS_PER_DAY * 2);
+ if (ret == -EINVAL)
+ cros_ec_rtc->rtc->alarm_offset_max = SECS_PER_DAY - 1;
+
+ (void)cros_ec_rtc_set(cros_ec, EC_CMD_RTC_SET_ALARM,
+ EC_RTC_ALARM_CLEAR);
+
ret = devm_rtc_register_device(cros_ec_rtc->rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index ee2efb496174..2f5d60622564 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
#include <linux/slab.h>
@@ -496,6 +497,12 @@ static int da9063_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n",
irq_alarm, ret);
+ ret = dev_pm_set_wake_irq(&pdev->dev, irq_alarm);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "Failed to set IRQ %d as a wake IRQ: %d\n",
+ irq_alarm, ret);
+
device_init_wakeup(&pdev->dev, true);
return devm_rtc_register_device(rtc->rtc_dev);
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index ed9360486953..d4de401548b4 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -336,8 +336,8 @@ static int ds1305_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
/* make sure alarm fires within the next 24 hours */
if (later <= now)
return -EINVAL;
- if ((later - now) > 24 * 60 * 60)
- return -EDOM;
+ if ((later - now) > ds1305->rtc->alarm_offset_max)
+ return -ERANGE;
/* disable alarm if needed */
if (ds1305->ctrl[0] & DS1305_AEI0) {
@@ -691,6 +691,7 @@ static int ds1305_probe(struct spi_device *spi)
ds1305->rtc->ops = &ds1305_ops;
ds1305->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
ds1305->rtc->range_max = RTC_TIMESTAMP_END_2099;
+ ds1305->rtc->alarm_offset_max = 24 * 60 * 60;
ds1305_nvmem_cfg.priv = ds1305;
status = devm_rtc_register_device(ds1305->rtc);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index cb5acecc11aa..506b7d1c2397 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -1744,7 +1744,7 @@ static int ds1307_probe(struct i2c_client *client)
match = device_get_match_data(&client->dev);
if (match) {
- ds1307->type = (enum ds_type)match;
+ ds1307->type = (uintptr_t)match;
chip = &chips[ds1307->type];
} else if (id) {
chip = &chips[id->driver_data];
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index a5026b0514e7..6ae8b9a294fe 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -16,7 +16,6 @@
#include <linux/jiffies.h>
#include <linux/rtc.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/module.h>
diff --git a/drivers/rtc/rtc-ds2404.c b/drivers/rtc/rtc-ds2404.c
index 0480f592307e..3231fd9f61da 100644
--- a/drivers/rtc/rtc-ds2404.c
+++ b/drivers/rtc/rtc-ds2404.c
@@ -7,9 +7,8 @@
#include <linux/rtc.h>
#include <linux/types.h>
#include <linux/bcd.h>
-#include <linux/platform_data/rtc-ds2404.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/io.h>
@@ -27,164 +26,140 @@
#define DS2404_CLK 1
#define DS2404_DQ 2
-struct ds2404_gpio {
- const char *name;
- unsigned int gpio;
-};
-
struct ds2404 {
- struct ds2404_gpio *gpio;
+ struct device *dev;
+ struct gpio_desc *rst_gpiod;
+ struct gpio_desc *clk_gpiod;
+ struct gpio_desc *dq_gpiod;
struct rtc_device *rtc;
};
-static struct ds2404_gpio ds2404_gpio[] = {
- { "RTC RST", 0 },
- { "RTC CLK", 0 },
- { "RTC DQ", 0 },
-};
-
-static int ds2404_gpio_map(struct ds2404 *chip, struct platform_device *pdev,
- struct ds2404_platform_data *pdata)
+static int ds2404_gpio_map(struct ds2404 *chip, struct platform_device *pdev)
{
- int i, err;
-
- ds2404_gpio[DS2404_RST].gpio = pdata->gpio_rst;
- ds2404_gpio[DS2404_CLK].gpio = pdata->gpio_clk;
- ds2404_gpio[DS2404_DQ].gpio = pdata->gpio_dq;
-
- for (i = 0; i < ARRAY_SIZE(ds2404_gpio); i++) {
- err = gpio_request(ds2404_gpio[i].gpio, ds2404_gpio[i].name);
- if (err) {
- dev_err(&pdev->dev, "error mapping gpio %s: %d\n",
- ds2404_gpio[i].name, err);
- goto err_request;
- }
- if (i != DS2404_DQ)
- gpio_direction_output(ds2404_gpio[i].gpio, 1);
- }
+ struct device *dev = &pdev->dev;
- chip->gpio = ds2404_gpio;
- return 0;
+ /* This will de-assert RESET, declare this GPIO as GPIOD_ACTIVE_LOW */
+ chip->rst_gpiod = devm_gpiod_get(dev, "rst", GPIOD_OUT_LOW);
+ if (IS_ERR(chip->rst_gpiod))
+ return PTR_ERR(chip->rst_gpiod);
-err_request:
- while (--i >= 0)
- gpio_free(ds2404_gpio[i].gpio);
- return err;
-}
+ chip->clk_gpiod = devm_gpiod_get(dev, "clk", GPIOD_OUT_HIGH);
+ if (IS_ERR(chip->clk_gpiod))
+ return PTR_ERR(chip->clk_gpiod);
-static void ds2404_gpio_unmap(void *data)
-{
- int i;
+ chip->dq_gpiod = devm_gpiod_get(dev, "dq", GPIOD_ASIS);
+ if (IS_ERR(chip->dq_gpiod))
+ return PTR_ERR(chip->dq_gpiod);
- for (i = 0; i < ARRAY_SIZE(ds2404_gpio); i++)
- gpio_free(ds2404_gpio[i].gpio);
+ return 0;
}
-static void ds2404_reset(struct device *dev)
+static void ds2404_reset(struct ds2404 *chip)
{
- gpio_set_value(ds2404_gpio[DS2404_RST].gpio, 0);
+ gpiod_set_value(chip->rst_gpiod, 1);
udelay(1000);
- gpio_set_value(ds2404_gpio[DS2404_RST].gpio, 1);
- gpio_set_value(ds2404_gpio[DS2404_CLK].gpio, 0);
- gpio_direction_output(ds2404_gpio[DS2404_DQ].gpio, 0);
+ gpiod_set_value(chip->rst_gpiod, 0);
+ gpiod_set_value(chip->clk_gpiod, 0);
+ gpiod_direction_output(chip->dq_gpiod, 0);
udelay(10);
}
-static void ds2404_write_byte(struct device *dev, u8 byte)
+static void ds2404_write_byte(struct ds2404 *chip, u8 byte)
{
int i;
- gpio_direction_output(ds2404_gpio[DS2404_DQ].gpio, 1);
+ gpiod_direction_output(chip->dq_gpiod, 1);
for (i = 0; i < 8; i++) {
- gpio_set_value(ds2404_gpio[DS2404_DQ].gpio, byte & (1 << i));
+ gpiod_set_value(chip->dq_gpiod, byte & (1 << i));
udelay(10);
- gpio_set_value(ds2404_gpio[DS2404_CLK].gpio, 1);
+ gpiod_set_value(chip->clk_gpiod, 1);
udelay(10);
- gpio_set_value(ds2404_gpio[DS2404_CLK].gpio, 0);
+ gpiod_set_value(chip->clk_gpiod, 0);
udelay(10);
}
}
-static u8 ds2404_read_byte(struct device *dev)
+static u8 ds2404_read_byte(struct ds2404 *chip)
{
int i;
u8 ret = 0;
- gpio_direction_input(ds2404_gpio[DS2404_DQ].gpio);
+ gpiod_direction_input(chip->dq_gpiod);
for (i = 0; i < 8; i++) {
- gpio_set_value(ds2404_gpio[DS2404_CLK].gpio, 0);
+ gpiod_set_value(chip->clk_gpiod, 0);
udelay(10);
- if (gpio_get_value(ds2404_gpio[DS2404_DQ].gpio))
+ if (gpiod_get_value(chip->dq_gpiod))
ret |= 1 << i;
- gpio_set_value(ds2404_gpio[DS2404_CLK].gpio, 1);
+ gpiod_set_value(chip->clk_gpiod, 1);
udelay(10);
}
return ret;
}
-static void ds2404_read_memory(struct device *dev, u16 offset,
+static void ds2404_read_memory(struct ds2404 *chip, u16 offset,
int length, u8 *out)
{
- ds2404_reset(dev);
- ds2404_write_byte(dev, DS2404_READ_MEMORY_CMD);
- ds2404_write_byte(dev, offset & 0xff);
- ds2404_write_byte(dev, (offset >> 8) & 0xff);
+ ds2404_reset(chip);
+ ds2404_write_byte(chip, DS2404_READ_MEMORY_CMD);
+ ds2404_write_byte(chip, offset & 0xff);
+ ds2404_write_byte(chip, (offset >> 8) & 0xff);
while (length--)
- *out++ = ds2404_read_byte(dev);
+ *out++ = ds2404_read_byte(chip);
}
-static void ds2404_write_memory(struct device *dev, u16 offset,
+static void ds2404_write_memory(struct ds2404 *chip, u16 offset,
int length, u8 *out)
{
int i;
u8 ta01, ta02, es;
- ds2404_reset(dev);
- ds2404_write_byte(dev, DS2404_WRITE_SCRATCHPAD_CMD);
- ds2404_write_byte(dev, offset & 0xff);
- ds2404_write_byte(dev, (offset >> 8) & 0xff);
+ ds2404_reset(chip);
+ ds2404_write_byte(chip, DS2404_WRITE_SCRATCHPAD_CMD);
+ ds2404_write_byte(chip, offset & 0xff);
+ ds2404_write_byte(chip, (offset >> 8) & 0xff);
for (i = 0; i < length; i++)
- ds2404_write_byte(dev, out[i]);
+ ds2404_write_byte(chip, out[i]);
- ds2404_reset(dev);
- ds2404_write_byte(dev, DS2404_READ_SCRATCHPAD_CMD);
+ ds2404_reset(chip);
+ ds2404_write_byte(chip, DS2404_READ_SCRATCHPAD_CMD);
- ta01 = ds2404_read_byte(dev);
- ta02 = ds2404_read_byte(dev);
- es = ds2404_read_byte(dev);
+ ta01 = ds2404_read_byte(chip);
+ ta02 = ds2404_read_byte(chip);
+ es = ds2404_read_byte(chip);
for (i = 0; i < length; i++) {
- if (out[i] != ds2404_read_byte(dev)) {
- dev_err(dev, "read invalid data\n");
+ if (out[i] != ds2404_read_byte(chip)) {
+ dev_err(chip->dev, "read invalid data\n");
return;
}
}
- ds2404_reset(dev);
- ds2404_write_byte(dev, DS2404_COPY_SCRATCHPAD_CMD);
- ds2404_write_byte(dev, ta01);
- ds2404_write_byte(dev, ta02);
- ds2404_write_byte(dev, es);
+ ds2404_reset(chip);
+ ds2404_write_byte(chip, DS2404_COPY_SCRATCHPAD_CMD);
+ ds2404_write_byte(chip, ta01);
+ ds2404_write_byte(chip, ta02);
+ ds2404_write_byte(chip, es);
- gpio_direction_input(ds2404_gpio[DS2404_DQ].gpio);
- while (gpio_get_value(ds2404_gpio[DS2404_DQ].gpio))
+ while (gpiod_get_value(chip->dq_gpiod))
;
}
-static void ds2404_enable_osc(struct device *dev)
+static void ds2404_enable_osc(struct ds2404 *chip)
{
u8 in[1] = { 0x10 }; /* enable oscillator */
- ds2404_write_memory(dev, 0x201, 1, in);
+
+ ds2404_write_memory(chip, 0x201, 1, in);
}
static int ds2404_read_time(struct device *dev, struct rtc_time *dt)
{
+ struct ds2404 *chip = dev_get_drvdata(dev);
unsigned long time = 0;
__le32 hw_time = 0;
- ds2404_read_memory(dev, 0x203, 4, (u8 *)&hw_time);
+ ds2404_read_memory(chip, 0x203, 4, (u8 *)&hw_time);
time = le32_to_cpu(hw_time);
rtc_time64_to_tm(time, dt);
@@ -193,8 +168,9 @@ static int ds2404_read_time(struct device *dev, struct rtc_time *dt)
static int ds2404_set_time(struct device *dev, struct rtc_time *dt)
{
+ struct ds2404 *chip = dev_get_drvdata(dev);
u32 time = cpu_to_le32(rtc_tm_to_time64(dt));
- ds2404_write_memory(dev, 0x203, 4, (u8 *)&time);
+ ds2404_write_memory(chip, 0x203, 4, (u8 *)&time);
return 0;
}
@@ -205,7 +181,6 @@ static const struct rtc_class_ops ds2404_rtc_ops = {
static int rtc_probe(struct platform_device *pdev)
{
- struct ds2404_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct ds2404 *chip;
int retval = -EBUSY;
@@ -213,22 +188,16 @@ static int rtc_probe(struct platform_device *pdev)
if (!chip)
return -ENOMEM;
+ chip->dev = &pdev->dev;
+
chip->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(chip->rtc))
return PTR_ERR(chip->rtc);
- retval = ds2404_gpio_map(chip, pdev, pdata);
+ retval = ds2404_gpio_map(chip, pdev);
if (retval)
return retval;
- retval = devm_add_action_or_reset(&pdev->dev, ds2404_gpio_unmap, chip);
- if (retval)
- return retval;
-
- dev_info(&pdev->dev, "using GPIOs RST:%d, CLK:%d, DQ:%d\n",
- chip->gpio[DS2404_RST].gpio, chip->gpio[DS2404_CLK].gpio,
- chip->gpio[DS2404_DQ].gpio);
-
platform_set_drvdata(pdev, chip);
chip->rtc->ops = &ds2404_rtc_ops;
@@ -238,7 +207,7 @@ static int rtc_probe(struct platform_device *pdev)
if (retval)
return retval;
- ds2404_enable_osc(&pdev->dev);
+ ds2404_enable_osc(chip);
return 0;
}
diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
index 3d7c4077fe1c..a72c4ad0cec6 100644
--- a/drivers/rtc/rtc-fsl-ftm-alarm.c
+++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
@@ -11,11 +11,8 @@
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/fsl/ftm.h>
#include <linux/rtc.h>
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c
index a613257d1574..4eef7afcc8bc 100644
--- a/drivers/rtc/rtc-isl12022.c
+++ b/drivers/rtc/rtc-isl12022.c
@@ -9,6 +9,8 @@
*/
#include <linux/bcd.h>
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/hwmon.h>
#include <linux/i2c.h>
@@ -31,6 +33,8 @@
#define ISL12022_REG_SR 0x07
#define ISL12022_REG_INT 0x08
+#define ISL12022_REG_PWR_VBAT 0x0a
+
#define ISL12022_REG_BETA 0x0d
#define ISL12022_REG_TEMP_L 0x28
@@ -41,6 +45,12 @@
#define ISL12022_SR_LBAT75 (1 << 1)
#define ISL12022_INT_WRTC (1 << 6)
+#define ISL12022_INT_FO_MASK GENMASK(3, 0)
+#define ISL12022_INT_FO_OFF 0x0
+#define ISL12022_INT_FO_32K 0x1
+
+#define ISL12022_REG_VB85_MASK GENMASK(5, 3)
+#define ISL12022_REG_VB75_MASK GENMASK(2, 0)
#define ISL12022_BETA_TSE (1 << 7)
@@ -141,12 +151,6 @@ static int isl12022_rtc_read_time(struct device *dev, struct rtc_time *tm)
if (ret)
return ret;
- if (buf[ISL12022_REG_SR] & (ISL12022_SR_LBAT85 | ISL12022_SR_LBAT75)) {
- dev_warn(dev,
- "voltage dropped below %u%%, date and time is not reliable.\n",
- buf[ISL12022_REG_SR] & ISL12022_SR_LBAT85 ? 85 : 75);
- }
-
dev_dbg(dev,
"raw data is sec=%02x, min=%02x, hr=%02x, mday=%02x, mon=%02x, year=%02x, wday=%02x, sr=%02x, int=%02x",
buf[ISL12022_REG_SC],
@@ -204,7 +208,34 @@ static int isl12022_rtc_set_time(struct device *dev, struct rtc_time *tm)
return regmap_bulk_write(regmap, ISL12022_REG_SC, buf, sizeof(buf));
}
+static int isl12022_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+ u32 user, val;
+ int ret;
+
+ switch (cmd) {
+ case RTC_VL_READ:
+ ret = regmap_read(regmap, ISL12022_REG_SR, &val);
+ if (ret)
+ return ret;
+
+ user = 0;
+ if (val & ISL12022_SR_LBAT85)
+ user |= RTC_VL_BACKUP_LOW;
+
+ if (val & ISL12022_SR_LBAT75)
+ user |= RTC_VL_BACKUP_EMPTY;
+
+ return put_user(user, (u32 __user *)arg);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
static const struct rtc_class_ops isl12022_rtc_ops = {
+ .ioctl = isl12022_rtc_ioctl,
.read_time = isl12022_rtc_read_time,
.set_time = isl12022_rtc_set_time,
};
@@ -215,10 +246,88 @@ static const struct regmap_config regmap_config = {
.use_single_write = true,
};
+static int isl12022_register_clock(struct device *dev)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+ struct clk_hw *hw;
+ int ret;
+
+ if (!device_property_present(dev, "#clock-cells")) {
+ /*
+ * Disabling the F_OUT pin reduces the power
+ * consumption in battery mode by ~25%.
+ */
+ regmap_update_bits(regmap, ISL12022_REG_INT, ISL12022_INT_FO_MASK,
+ ISL12022_INT_FO_OFF);
+
+ return 0;
+ }
+
+ if (!IS_ENABLED(CONFIG_COMMON_CLK))
+ return 0;
+
+ /*
+ * For now, only support a fixed clock of 32768Hz (the reset default).
+ */
+ ret = regmap_update_bits(regmap, ISL12022_REG_INT,
+ ISL12022_INT_FO_MASK, ISL12022_INT_FO_32K);
+ if (ret)
+ return ret;
+
+ hw = devm_clk_hw_register_fixed_rate(dev, "isl12022", NULL, 0, 32768);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
+}
+
+static const u32 trip_levels[2][7] = {
+ { 2125000, 2295000, 2550000, 2805000, 3060000, 4250000, 4675000 },
+ { 1875000, 2025000, 2250000, 2475000, 2700000, 3750000, 4125000 },
+};
+
+static void isl12022_set_trip_levels(struct device *dev)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+ u32 levels[2] = {0, 0};
+ int ret, i, j, x[2];
+ u8 val, mask;
+
+ device_property_read_u32_array(dev, "isil,battery-trip-levels-microvolt",
+ levels, 2);
+
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < ARRAY_SIZE(trip_levels[i]) - 1; j++) {
+ if (levels[i] <= trip_levels[i][j])
+ break;
+ }
+ x[i] = j;
+ }
+
+ val = FIELD_PREP(ISL12022_REG_VB85_MASK, x[0]) |
+ FIELD_PREP(ISL12022_REG_VB75_MASK, x[1]);
+ mask = ISL12022_REG_VB85_MASK | ISL12022_REG_VB75_MASK;
+
+ ret = regmap_update_bits(regmap, ISL12022_REG_PWR_VBAT, mask, val);
+ if (ret)
+ dev_warn(dev, "unable to set battery alarm levels: %d\n", ret);
+
+ /*
+ * Force a write of the TSE bit in the BETA register, in order
+ * to trigger an update of the LBAT75 and LBAT85 bits in the
+ * status register. In battery backup mode, those bits have
+ * another meaning, so without this, they may contain stale
+ * values for up to a minute after power-on.
+ */
+ regmap_write_bits(regmap, ISL12022_REG_BETA,
+ ISL12022_BETA_TSE, ISL12022_BETA_TSE);
+}
+
static int isl12022_probe(struct i2c_client *client)
{
struct rtc_device *rtc;
struct regmap *regmap;
+ int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
@@ -231,6 +340,11 @@ static int isl12022_probe(struct i2c_client *client)
dev_set_drvdata(&client->dev, regmap);
+ ret = isl12022_register_clock(&client->dev);
+ if (ret)
+ return ret;
+
+ isl12022_set_trip_levels(&client->dev);
isl12022_hwmon_register(&client->dev);
rtc = devm_rtc_allocate_device(&client->dev);
diff --git a/drivers/rtc/rtc-isl12026.c b/drivers/rtc/rtc-isl12026.c
index 5abff5d348ac..2aabb9151d4c 100644
--- a/drivers/rtc/rtc-isl12026.c
+++ b/drivers/rtc/rtc-isl12026.c
@@ -11,7 +11,6 @@
#include <linux/mutex.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/rtc.h>
#include <linux/slab.h>
@@ -429,7 +428,7 @@ static void isl12026_force_power_modes(struct i2c_client *client)
}
}
-static int isl12026_probe_new(struct i2c_client *client)
+static int isl12026_probe(struct i2c_client *client)
{
struct isl12026 *priv;
int ret;
@@ -490,7 +489,7 @@ static struct i2c_driver isl12026_driver = {
.name = "rtc-isl12026",
.of_match_table = isl12026_dt_match,
},
- .probe = isl12026_probe_new,
+ .probe = isl12026_probe,
.remove = isl12026_remove,
};
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index b0712b4e3648..e50c23ee1646 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -9,7 +9,7 @@
#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/rtc.h>
@@ -188,7 +188,7 @@ isl1208_i2c_validate_client(struct i2c_client *client)
static int isl1208_set_xtoscb(struct i2c_client *client, int sr, int xtosb_val)
{
/* Do nothing if bit is already set to desired value */
- if ((sr & ISL1208_REG_SR_XTOSCB) == xtosb_val)
+ if (!!(sr & ISL1208_REG_SR_XTOSCB) == xtosb_val)
return 0;
if (xtosb_val)
@@ -862,17 +862,9 @@ isl1208_probe(struct i2c_client *client)
i2c_set_clientdata(client, isl1208);
/* Determine which chip we have */
- if (client->dev.of_node) {
- isl1208->config = of_device_get_match_data(&client->dev);
- if (!isl1208->config)
- return -ENODEV;
- } else {
- const struct i2c_device_id *id = i2c_match_id(isl1208_id, client);
-
- if (!id)
- return -ENODEV;
- isl1208->config = (struct isl1208_config *)id->driver_data;
- }
+ isl1208->config = i2c_get_match_data(client);
+ if (!isl1208->config)
+ return -ENODEV;
rc = isl1208_clk_present(client, "xin");
if (rc < 0)
@@ -952,7 +944,6 @@ isl1208_probe(struct i2c_client *client)
rc = isl1208_setup_irq(client, client->irq);
if (rc)
return rc;
-
} else {
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, isl1208->rtc->features);
}
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 36453b008139..bafa7d1b9b88 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -11,7 +11,7 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/property.h>
@@ -349,7 +349,7 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
if (!rtc)
return -ENOMEM;
- rtc->type = (enum jz4740_rtc_type)device_get_match_data(dev);
+ rtc->type = (uintptr_t)device_get_match_data(dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
diff --git a/drivers/rtc/rtc-lpc24xx.c b/drivers/rtc/rtc-lpc24xx.c
index a4612e543f35..df17c48ff086 100644
--- a/drivers/rtc/rtc-lpc24xx.c
+++ b/drivers/rtc/rtc-lpc24xx.c
@@ -9,9 +9,8 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 3cc5151e0986..866489ad56d6 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -17,7 +17,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/mutex.h>
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index 481c9525b1dd..dd4a62e2d39c 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -11,6 +11,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <linux/bcd.h>
@@ -269,9 +270,16 @@ static int m48t86_rtc_probe(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id m48t86_rtc_of_ids[] = {
+ { .compatible = "st,m48t86" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, m48t86_rtc_of_ids);
+
static struct platform_driver m48t86_rtc_platform_driver = {
.driver = {
.name = "rtc-m48t86",
+ .of_match_table = m48t86_rtc_of_ids,
},
.probe = m48t86_rtc_probe,
};
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index 07df43e4c4d0..28858fcaea8f 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -11,10 +11,8 @@
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 1d297af80f87..1617063669cc 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -9,7 +9,7 @@
#include <linux/mfd/mt6397/core.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
diff --git a/drivers/rtc/rtc-mt7622.c b/drivers/rtc/rtc-mt7622.c
index 81857a457c32..094c649fc137 100644
--- a/drivers/rtc/rtc-mt7622.c
+++ b/drivers/rtc/rtc-mt7622.c
@@ -7,9 +7,9 @@
#include <linux/clk.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 762cf03345f1..dbb935dbbd8a 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -11,7 +11,6 @@
#include <linux/pm_wakeirq.h>
#include <linux/clk.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#define RTC_INPUT_CLK_32768HZ (0x00 << 5)
#define RTC_INPUT_CLK_32000HZ (0x01 << 5)
diff --git a/drivers/rtc/rtc-nct3018y.c b/drivers/rtc/rtc-nct3018y.c
index a4e3f924837e..ed4e606be8e5 100644
--- a/drivers/rtc/rtc-nct3018y.c
+++ b/drivers/rtc/rtc-nct3018y.c
@@ -538,7 +538,7 @@ MODULE_DEVICE_TABLE(of, nct3018y_of_match);
static struct i2c_driver nct3018y_driver = {
.driver = {
.name = "rtc-nct3018y",
- .of_match_table = of_match_ptr(nct3018y_of_match),
+ .of_match_table = nct3018y_of_match,
},
.probe = nct3018y_probe,
.id_table = nct3018y_id,
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 8ae4d7824ec9..5b10ab06cd2e 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -747,12 +747,12 @@ static int omap_rtc_probe(struct platform_device *pdev)
}
rtc->irq_timer = platform_get_irq(pdev, 0);
- if (rtc->irq_timer <= 0)
- return -ENOENT;
+ if (rtc->irq_timer < 0)
+ return rtc->irq_timer;
rtc->irq_alarm = platform_get_irq(pdev, 1);
- if (rtc->irq_alarm <= 0)
- return -ENOENT;
+ if (rtc->irq_alarm < 0)
+ return rtc->irq_alarm;
rtc->clk = devm_clk_get(&pdev->dev, "ext-clk");
if (!IS_ERR(rtc->clk))
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index ee03b04b74ba..9c04c4e1a49c 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * An I2C and SPI driver for the NXP PCF2127/29 RTC
+ * An I2C and SPI driver for the NXP PCF2127/29/31 RTC
* Copyright 2013 Til-Technologies
*
* Author: Renaud Cerrato <r.cerrato@til-technologies.fr>
@@ -8,9 +8,13 @@
* Watchdog and tamper functions
* Author: Bruno Thomsen <bruno.thomsen@gmail.com>
*
+ * PCF2131 support
+ * Author: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+ *
* based on the other drivers in this same directory.
*
- * Datasheet: https://www.nxp.com/docs/en/data-sheet/PCF2127.pdf
+ * Datasheets: https://www.nxp.com/docs/en/data-sheet/PCF2127.pdf
+ * https://www.nxp.com/docs/en/data-sheet/PCF2131DS.pdf
*/
#include <linux/i2c.h>
@@ -21,6 +25,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/watchdog.h>
@@ -28,6 +33,7 @@
#define PCF2127_REG_CTRL1 0x00
#define PCF2127_BIT_CTRL1_POR_OVRD BIT(3)
#define PCF2127_BIT_CTRL1_TSF1 BIT(4)
+#define PCF2127_BIT_CTRL1_STOP BIT(5)
/* Control register 2 */
#define PCF2127_REG_CTRL2 0x01
#define PCF2127_BIT_CTRL2_AIE BIT(1)
@@ -43,20 +49,10 @@
#define PCF2127_BIT_CTRL3_BF BIT(3)
#define PCF2127_BIT_CTRL3_BTSE BIT(4)
/* Time and date registers */
-#define PCF2127_REG_SC 0x03
+#define PCF2127_REG_TIME_BASE 0x03
#define PCF2127_BIT_SC_OSF BIT(7)
-#define PCF2127_REG_MN 0x04
-#define PCF2127_REG_HR 0x05
-#define PCF2127_REG_DM 0x06
-#define PCF2127_REG_DW 0x07
-#define PCF2127_REG_MO 0x08
-#define PCF2127_REG_YR 0x09
/* Alarm registers */
-#define PCF2127_REG_ALARM_SC 0x0A
-#define PCF2127_REG_ALARM_MN 0x0B
-#define PCF2127_REG_ALARM_HR 0x0C
-#define PCF2127_REG_ALARM_DM 0x0D
-#define PCF2127_REG_ALARM_DW 0x0E
+#define PCF2127_REG_ALARM_BASE 0x0A
#define PCF2127_BIT_ALARM_AE BIT(7)
/* CLKOUT control register */
#define PCF2127_REG_CLKOUT 0x0f
@@ -68,21 +64,15 @@
#define PCF2127_BIT_WD_CTL_CD0 BIT(6)
#define PCF2127_BIT_WD_CTL_CD1 BIT(7)
#define PCF2127_REG_WD_VAL 0x11
-/* Tamper timestamp registers */
-#define PCF2127_REG_TS_CTRL 0x12
+/* Tamper timestamp1 registers */
+#define PCF2127_REG_TS1_BASE 0x12
#define PCF2127_BIT_TS_CTRL_TSOFF BIT(6)
#define PCF2127_BIT_TS_CTRL_TSM BIT(7)
-#define PCF2127_REG_TS_SC 0x13
-#define PCF2127_REG_TS_MN 0x14
-#define PCF2127_REG_TS_HR 0x15
-#define PCF2127_REG_TS_DM 0x16
-#define PCF2127_REG_TS_MO 0x17
-#define PCF2127_REG_TS_YR 0x18
/*
* RAM registers
* PCF2127 has 512 bytes general-purpose static RAM (SRAM) that is
* battery backed and can survive a power outage.
- * PCF2129 doesn't have this feature.
+ * PCF2129/31 doesn't have this feature.
*/
#define PCF2127_REG_RAM_ADDR_MSB 0x1A
#define PCF2127_REG_RAM_WRT_CMD 0x1C
@@ -90,9 +80,14 @@
/* Watchdog timer value constants */
#define PCF2127_WD_VAL_STOP 0
-#define PCF2127_WD_VAL_MIN 2
-#define PCF2127_WD_VAL_MAX 255
-#define PCF2127_WD_VAL_DEFAULT 60
+/* PCF2127/29 watchdog timer value constants */
+#define PCF2127_WD_CLOCK_HZ_X1000 1000 /* 1Hz */
+#define PCF2127_WD_MIN_HW_HEARTBEAT_MS 500
+/* PCF2131 watchdog timer value constants */
+#define PCF2131_WD_CLOCK_HZ_X1000 250 /* 1/4Hz */
+#define PCF2131_WD_MIN_HW_HEARTBEAT_MS 4000
+
+#define PCF2127_WD_DEFAULT_TIMEOUT_S 60
/* Mask for currently enabled interrupts */
#define PCF2127_CTRL1_IRQ_MASK (PCF2127_BIT_CTRL1_TSF1)
@@ -101,13 +96,117 @@
PCF2127_BIT_CTRL2_WDTF | \
PCF2127_BIT_CTRL2_TSF2)
+#define PCF2127_MAX_TS_SUPPORTED 4
+
+/* Control register 4 */
+#define PCF2131_REG_CTRL4 0x03
+#define PCF2131_BIT_CTRL4_TSF4 BIT(4)
+#define PCF2131_BIT_CTRL4_TSF3 BIT(5)
+#define PCF2131_BIT_CTRL4_TSF2 BIT(6)
+#define PCF2131_BIT_CTRL4_TSF1 BIT(7)
+/* Control register 5 */
+#define PCF2131_REG_CTRL5 0x04
+#define PCF2131_BIT_CTRL5_TSIE4 BIT(4)
+#define PCF2131_BIT_CTRL5_TSIE3 BIT(5)
+#define PCF2131_BIT_CTRL5_TSIE2 BIT(6)
+#define PCF2131_BIT_CTRL5_TSIE1 BIT(7)
+/* Software reset register */
+#define PCF2131_REG_SR_RESET 0x05
+#define PCF2131_SR_RESET_READ_PATTERN (BIT(2) | BIT(5))
+#define PCF2131_SR_RESET_CPR_CMD (PCF2131_SR_RESET_READ_PATTERN | BIT(7))
+/* Time and date registers */
+#define PCF2131_REG_TIME_BASE 0x07
+/* Alarm registers */
+#define PCF2131_REG_ALARM_BASE 0x0E
+/* CLKOUT control register */
+#define PCF2131_REG_CLKOUT 0x13
+/* Watchdog registers */
+#define PCF2131_REG_WD_CTL 0x35
+#define PCF2131_REG_WD_VAL 0x36
+/* Tamper timestamp1 registers */
+#define PCF2131_REG_TS1_BASE 0x14
+/* Tamper timestamp2 registers */
+#define PCF2131_REG_TS2_BASE 0x1B
+/* Tamper timestamp3 registers */
+#define PCF2131_REG_TS3_BASE 0x22
+/* Tamper timestamp4 registers */
+#define PCF2131_REG_TS4_BASE 0x29
+/* Interrupt mask registers */
+#define PCF2131_REG_INT_A_MASK1 0x31
+#define PCF2131_REG_INT_A_MASK2 0x32
+#define PCF2131_REG_INT_B_MASK1 0x33
+#define PCF2131_REG_INT_B_MASK2 0x34
+#define PCF2131_BIT_INT_BLIE BIT(0)
+#define PCF2131_BIT_INT_BIE BIT(1)
+#define PCF2131_BIT_INT_AIE BIT(2)
+#define PCF2131_BIT_INT_WD_CD BIT(3)
+#define PCF2131_BIT_INT_SI BIT(4)
+#define PCF2131_BIT_INT_MI BIT(5)
+#define PCF2131_CTRL2_IRQ_MASK ( \
+ PCF2127_BIT_CTRL2_AF | \
+ PCF2127_BIT_CTRL2_WDTF)
+#define PCF2131_CTRL4_IRQ_MASK ( \
+ PCF2131_BIT_CTRL4_TSF4 | \
+ PCF2131_BIT_CTRL4_TSF3 | \
+ PCF2131_BIT_CTRL4_TSF2 | \
+ PCF2131_BIT_CTRL4_TSF1)
+
+enum pcf21xx_type {
+ PCF2127,
+ PCF2129,
+ PCF2131,
+ PCF21XX_LAST_ID
+};
+
+struct pcf21xx_ts_config {
+ u8 reg_base; /* Base register to read timestamp values. */
+
+ /*
+ * If the TS input pin is driven to GND, an interrupt can be generated
+ * (supported by all variants).
+ */
+ u8 gnd_detect_reg; /* Interrupt control register address. */
+ u8 gnd_detect_bit; /* Interrupt bit. */
+
+ /*
+ * If the TS input pin is driven to an intermediate level between GND
+ * and supply, an interrupt can be generated (optional feature depending
+ * on variant).
+ */
+ u8 inter_detect_reg; /* Interrupt control register address. */
+ u8 inter_detect_bit; /* Interrupt bit. */
+
+ u8 ie_reg; /* Interrupt enable control register. */
+ u8 ie_bit; /* Interrupt enable bit. */
+};
+
+struct pcf21xx_config {
+ int type; /* IC variant */
+ int max_register;
+ unsigned int has_nvmem:1;
+ unsigned int has_bit_wd_ctl_cd0:1;
+ unsigned int wd_val_reg_readable:1; /* If watchdog value register can be read. */
+ unsigned int has_int_a_b:1; /* PCF2131 supports two interrupt outputs. */
+ u8 reg_time_base; /* Time/date base register. */
+ u8 regs_alarm_base; /* Alarm function base registers. */
+ u8 reg_wd_ctl; /* Watchdog control register. */
+ u8 reg_wd_val; /* Watchdog value register. */
+ u8 reg_clkout; /* Clkout register. */
+ int wdd_clock_hz_x1000; /* Watchdog clock in Hz multiplicated by 1000 */
+ int wdd_min_hw_heartbeat_ms;
+ unsigned int ts_count;
+ struct pcf21xx_ts_config ts[PCF2127_MAX_TS_SUPPORTED];
+ struct attribute_group attribute_group;
+};
+
struct pcf2127 {
struct rtc_device *rtc;
struct watchdog_device wdd;
struct regmap *regmap;
- time64_t ts;
- bool ts_valid;
+ const struct pcf21xx_config *cfg;
bool irq_enabled;
+ time64_t ts[PCF2127_MAX_TS_SUPPORTED]; /* Timestamp values. */
+ bool ts_valid[PCF2127_MAX_TS_SUPPORTED]; /* Timestamp valid indication. */
};
/*
@@ -117,27 +216,22 @@ struct pcf2127 {
static int pcf2127_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
- unsigned char buf[10];
+ unsigned char buf[7];
int ret;
/*
* Avoid reading CTRL2 register as it causes WD_VAL register
* value to reset to 0 which means watchdog is stopped.
*/
- ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_CTRL3,
- (buf + PCF2127_REG_CTRL3),
- ARRAY_SIZE(buf) - PCF2127_REG_CTRL3);
+ ret = regmap_bulk_read(pcf2127->regmap, pcf2127->cfg->reg_time_base,
+ buf, sizeof(buf));
if (ret) {
dev_err(dev, "%s: read error\n", __func__);
return ret;
}
- if (buf[PCF2127_REG_CTRL3] & PCF2127_BIT_CTRL3_BLF)
- dev_info(dev,
- "low voltage detected, check/replace RTC battery.\n");
-
/* Clock integrity is not guaranteed when OSF flag is set. */
- if (buf[PCF2127_REG_SC] & PCF2127_BIT_SC_OSF) {
+ if (buf[0] & PCF2127_BIT_SC_OSF) {
/*
* no need clear the flag here,
* it will be cleared once the new date is saved
@@ -148,20 +242,17 @@ static int pcf2127_rtc_read_time(struct device *dev, struct rtc_time *tm)
}
dev_dbg(dev,
- "%s: raw data is cr3=%02x, sec=%02x, min=%02x, hr=%02x, "
+ "%s: raw data is sec=%02x, min=%02x, hr=%02x, "
"mday=%02x, wday=%02x, mon=%02x, year=%02x\n",
- __func__, buf[PCF2127_REG_CTRL3], buf[PCF2127_REG_SC],
- buf[PCF2127_REG_MN], buf[PCF2127_REG_HR],
- buf[PCF2127_REG_DM], buf[PCF2127_REG_DW],
- buf[PCF2127_REG_MO], buf[PCF2127_REG_YR]);
-
- tm->tm_sec = bcd2bin(buf[PCF2127_REG_SC] & 0x7F);
- tm->tm_min = bcd2bin(buf[PCF2127_REG_MN] & 0x7F);
- tm->tm_hour = bcd2bin(buf[PCF2127_REG_HR] & 0x3F); /* rtc hr 0-23 */
- tm->tm_mday = bcd2bin(buf[PCF2127_REG_DM] & 0x3F);
- tm->tm_wday = buf[PCF2127_REG_DW] & 0x07;
- tm->tm_mon = bcd2bin(buf[PCF2127_REG_MO] & 0x1F) - 1; /* rtc mn 1-12 */
- tm->tm_year = bcd2bin(buf[PCF2127_REG_YR]);
+ __func__, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
+
+ tm->tm_sec = bcd2bin(buf[0] & 0x7F);
+ tm->tm_min = bcd2bin(buf[1] & 0x7F);
+ tm->tm_hour = bcd2bin(buf[2] & 0x3F);
+ tm->tm_mday = bcd2bin(buf[3] & 0x3F);
+ tm->tm_wday = buf[4] & 0x07;
+ tm->tm_mon = bcd2bin(buf[5] & 0x1F) - 1;
+ tm->tm_year = bcd2bin(buf[6]);
tm->tm_year += 100;
dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
@@ -198,14 +289,45 @@ static int pcf2127_rtc_set_time(struct device *dev, struct rtc_time *tm)
/* year */
buf[i++] = bin2bcd(tm->tm_year - 100);
- /* write register's data */
- err = regmap_bulk_write(pcf2127->regmap, PCF2127_REG_SC, buf, i);
+ /* Write access to time registers:
+ * PCF2127/29: no special action required.
+ * PCF2131: requires setting the STOP and CPR bits. STOP bit needs to
+ * be cleared after time registers are updated.
+ */
+ if (pcf2127->cfg->type == PCF2131) {
+ err = regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL1,
+ PCF2127_BIT_CTRL1_STOP,
+ PCF2127_BIT_CTRL1_STOP);
+ if (err) {
+ dev_dbg(dev, "setting STOP bit failed\n");
+ return err;
+ }
+
+ err = regmap_write(pcf2127->regmap, PCF2131_REG_SR_RESET,
+ PCF2131_SR_RESET_CPR_CMD);
+ if (err) {
+ dev_dbg(dev, "sending CPR cmd failed\n");
+ return err;
+ }
+ }
+
+ /* write time register's data */
+ err = regmap_bulk_write(pcf2127->regmap, pcf2127->cfg->reg_time_base, buf, i);
if (err) {
- dev_err(dev,
- "%s: err=%d", __func__, err);
+ dev_dbg(dev, "%s: err=%d", __func__, err);
return err;
}
+ if (pcf2127->cfg->type == PCF2131) {
+ /* Clear STOP bit (PCF2131 only) after write is completed. */
+ err = regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL1,
+ PCF2127_BIT_CTRL1_STOP, 0);
+ if (err) {
+ dev_dbg(dev, "clearing STOP bit failed\n");
+ return err;
+ }
+ }
+
return 0;
}
@@ -275,9 +397,16 @@ static int pcf2127_nvmem_write(void *priv, unsigned int offset,
static int pcf2127_wdt_ping(struct watchdog_device *wdd)
{
+ int wd_val;
struct pcf2127 *pcf2127 = watchdog_get_drvdata(wdd);
- return regmap_write(pcf2127->regmap, PCF2127_REG_WD_VAL, wdd->timeout);
+ /*
+ * Compute counter value of WATCHDG_TIM_VAL to obtain desired period
+ * in seconds, depending on the source clock frequency.
+ */
+ wd_val = ((wdd->timeout * pcf2127->cfg->wdd_clock_hz_x1000) / 1000) + 1;
+
+ return regmap_write(pcf2127->regmap, pcf2127->cfg->reg_wd_val, wd_val);
}
/*
@@ -311,7 +440,7 @@ static int pcf2127_wdt_stop(struct watchdog_device *wdd)
{
struct pcf2127 *pcf2127 = watchdog_get_drvdata(wdd);
- return regmap_write(pcf2127->regmap, PCF2127_REG_WD_VAL,
+ return regmap_write(pcf2127->regmap, pcf2127->cfg->reg_wd_val,
PCF2127_WD_VAL_STOP);
}
@@ -339,9 +468,25 @@ static const struct watchdog_ops pcf2127_watchdog_ops = {
.set_timeout = pcf2127_wdt_set_timeout,
};
+/*
+ * Compute watchdog period, t, in seconds, from the WATCHDG_TIM_VAL register
+ * value, n, and the clock frequency, f1000, in Hz x 1000.
+ *
+ * The PCF2127/29 datasheet gives t as:
+ * t = n / f
+ * The PCF2131 datasheet gives t as:
+ * t = (n - 1) / f
+ * For both variants, the watchdog is triggered when the WATCHDG_TIM_VAL reaches
+ * the value 1, and not zero. Consequently, the equation from the PCF2131
+ * datasheet seems to be the correct one for both variants.
+ */
+static int pcf2127_watchdog_get_period(int n, int f1000)
+{
+ return (1000 * (n - 1)) / f1000;
+}
+
static int pcf2127_watchdog_init(struct device *dev, struct pcf2127 *pcf2127)
{
- u32 wdd_timeout;
int ret;
if (!IS_ENABLED(CONFIG_WATCHDOG) ||
@@ -351,21 +496,35 @@ static int pcf2127_watchdog_init(struct device *dev, struct pcf2127 *pcf2127)
pcf2127->wdd.parent = dev;
pcf2127->wdd.info = &pcf2127_wdt_info;
pcf2127->wdd.ops = &pcf2127_watchdog_ops;
- pcf2127->wdd.min_timeout = PCF2127_WD_VAL_MIN;
- pcf2127->wdd.max_timeout = PCF2127_WD_VAL_MAX;
- pcf2127->wdd.timeout = PCF2127_WD_VAL_DEFAULT;
- pcf2127->wdd.min_hw_heartbeat_ms = 500;
+
+ pcf2127->wdd.min_timeout =
+ pcf2127_watchdog_get_period(
+ 2, pcf2127->cfg->wdd_clock_hz_x1000);
+ pcf2127->wdd.max_timeout =
+ pcf2127_watchdog_get_period(
+ 255, pcf2127->cfg->wdd_clock_hz_x1000);
+ pcf2127->wdd.timeout = PCF2127_WD_DEFAULT_TIMEOUT_S;
+
+ dev_dbg(dev, "%s clock = %d Hz / 1000\n", __func__,
+ pcf2127->cfg->wdd_clock_hz_x1000);
+
+ pcf2127->wdd.min_hw_heartbeat_ms = pcf2127->cfg->wdd_min_hw_heartbeat_ms;
pcf2127->wdd.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
watchdog_set_drvdata(&pcf2127->wdd, pcf2127);
/* Test if watchdog timer is started by bootloader */
- ret = regmap_read(pcf2127->regmap, PCF2127_REG_WD_VAL, &wdd_timeout);
- if (ret)
- return ret;
+ if (pcf2127->cfg->wd_val_reg_readable) {
+ u32 wdd_timeout;
- if (wdd_timeout)
- set_bit(WDOG_HW_RUNNING, &pcf2127->wdd.status);
+ ret = regmap_read(pcf2127->regmap, pcf2127->cfg->reg_wd_val,
+ &wdd_timeout);
+ if (ret)
+ return ret;
+
+ if (wdd_timeout)
+ set_bit(WDOG_HW_RUNNING, &pcf2127->wdd.status);
+ }
return devm_watchdog_register_device(dev, &pcf2127->wdd);
}
@@ -386,8 +545,8 @@ static int pcf2127_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
if (ret)
return ret;
- ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_ALARM_SC, buf,
- sizeof(buf));
+ ret = regmap_bulk_read(pcf2127->regmap, pcf2127->cfg->regs_alarm_base,
+ buf, sizeof(buf));
if (ret)
return ret;
@@ -437,8 +596,8 @@ static int pcf2127_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
buf[3] = bin2bcd(alrm->time.tm_mday);
buf[4] = PCF2127_BIT_ALARM_AE; /* Do not match on week day */
- ret = regmap_bulk_write(pcf2127->regmap, PCF2127_REG_ALARM_SC, buf,
- sizeof(buf));
+ ret = regmap_bulk_write(pcf2127->regmap, pcf2127->cfg->regs_alarm_base,
+ buf, sizeof(buf));
if (ret)
return ret;
@@ -446,38 +605,35 @@ static int pcf2127_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
}
/*
- * This function reads ctrl2 register, caller is responsible for calling
- * pcf2127_wdt_active_ping()
+ * This function reads one timestamp function data, caller is responsible for
+ * calling pcf2127_wdt_active_ping()
*/
-static int pcf2127_rtc_ts_read(struct device *dev, time64_t *ts)
+static int pcf2127_rtc_ts_read(struct device *dev, time64_t *ts,
+ int ts_id)
{
struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
struct rtc_time tm;
int ret;
- unsigned char data[25];
+ unsigned char data[7];
- ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_CTRL1, data,
- sizeof(data));
+ ret = regmap_bulk_read(pcf2127->regmap, pcf2127->cfg->ts[ts_id].reg_base,
+ data, sizeof(data));
if (ret) {
dev_err(dev, "%s: read error ret=%d\n", __func__, ret);
return ret;
}
dev_dbg(dev,
- "%s: raw data is cr1=%02x, cr2=%02x, cr3=%02x, ts_sc=%02x, ts_mn=%02x, ts_hr=%02x, ts_dm=%02x, ts_mo=%02x, ts_yr=%02x\n",
- __func__, data[PCF2127_REG_CTRL1], data[PCF2127_REG_CTRL2],
- data[PCF2127_REG_CTRL3], data[PCF2127_REG_TS_SC],
- data[PCF2127_REG_TS_MN], data[PCF2127_REG_TS_HR],
- data[PCF2127_REG_TS_DM], data[PCF2127_REG_TS_MO],
- data[PCF2127_REG_TS_YR]);
-
- tm.tm_sec = bcd2bin(data[PCF2127_REG_TS_SC] & 0x7F);
- tm.tm_min = bcd2bin(data[PCF2127_REG_TS_MN] & 0x7F);
- tm.tm_hour = bcd2bin(data[PCF2127_REG_TS_HR] & 0x3F);
- tm.tm_mday = bcd2bin(data[PCF2127_REG_TS_DM] & 0x3F);
+ "%s: raw data is ts_sc=%02x, ts_mn=%02x, ts_hr=%02x, ts_dm=%02x, ts_mo=%02x, ts_yr=%02x\n",
+ __func__, data[1], data[2], data[3], data[4], data[5], data[6]);
+
+ tm.tm_sec = bcd2bin(data[1] & 0x7F);
+ tm.tm_min = bcd2bin(data[2] & 0x7F);
+ tm.tm_hour = bcd2bin(data[3] & 0x3F);
+ tm.tm_mday = bcd2bin(data[4] & 0x3F);
/* TS_MO register (month) value range: 1-12 */
- tm.tm_mon = bcd2bin(data[PCF2127_REG_TS_MO] & 0x1F) - 1;
- tm.tm_year = bcd2bin(data[PCF2127_REG_TS_YR]);
+ tm.tm_mon = bcd2bin(data[5] & 0x1F) - 1;
+ tm.tm_year = bcd2bin(data[6]);
if (tm.tm_year < 70)
tm.tm_year += 100; /* assume we are in 1970...2069 */
@@ -491,47 +647,84 @@ static int pcf2127_rtc_ts_read(struct device *dev, time64_t *ts)
return 0;
};
-static void pcf2127_rtc_ts_snapshot(struct device *dev)
+static void pcf2127_rtc_ts_snapshot(struct device *dev, int ts_id)
{
struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
int ret;
+ if (ts_id >= pcf2127->cfg->ts_count)
+ return;
+
/* Let userspace read the first timestamp */
- if (pcf2127->ts_valid)
+ if (pcf2127->ts_valid[ts_id])
return;
- ret = pcf2127_rtc_ts_read(dev, &pcf2127->ts);
+ ret = pcf2127_rtc_ts_read(dev, &pcf2127->ts[ts_id], ts_id);
if (!ret)
- pcf2127->ts_valid = true;
+ pcf2127->ts_valid[ts_id] = true;
}
static irqreturn_t pcf2127_rtc_irq(int irq, void *dev)
{
struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
- unsigned int ctrl1, ctrl2;
+ unsigned int ctrl2;
int ret = 0;
- ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL1, &ctrl1);
- if (ret)
- return IRQ_NONE;
-
ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2);
if (ret)
return IRQ_NONE;
- if (!(ctrl1 & PCF2127_CTRL1_IRQ_MASK || ctrl2 & PCF2127_CTRL2_IRQ_MASK))
- return IRQ_NONE;
+ if (pcf2127->cfg->ts_count == 1) {
+ /* PCF2127/29 */
+ unsigned int ctrl1;
+
+ ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL1, &ctrl1);
+ if (ret)
+ return IRQ_NONE;
+
+ if (!(ctrl1 & PCF2127_CTRL1_IRQ_MASK || ctrl2 & PCF2127_CTRL2_IRQ_MASK))
+ return IRQ_NONE;
+
+ if (ctrl1 & PCF2127_BIT_CTRL1_TSF1 || ctrl2 & PCF2127_BIT_CTRL2_TSF2)
+ pcf2127_rtc_ts_snapshot(dev, 0);
+
+ if (ctrl1 & PCF2127_CTRL1_IRQ_MASK)
+ regmap_write(pcf2127->regmap, PCF2127_REG_CTRL1,
+ ctrl1 & ~PCF2127_CTRL1_IRQ_MASK);
+
+ if (ctrl2 & PCF2127_CTRL2_IRQ_MASK)
+ regmap_write(pcf2127->regmap, PCF2127_REG_CTRL2,
+ ctrl2 & ~PCF2127_CTRL2_IRQ_MASK);
+ } else {
+ /* PCF2131. */
+ unsigned int ctrl4;
+
+ ret = regmap_read(pcf2127->regmap, PCF2131_REG_CTRL4, &ctrl4);
+ if (ret)
+ return IRQ_NONE;
+
+ if (!(ctrl4 & PCF2131_CTRL4_IRQ_MASK || ctrl2 & PCF2131_CTRL2_IRQ_MASK))
+ return IRQ_NONE;
- if (ctrl1 & PCF2127_BIT_CTRL1_TSF1 || ctrl2 & PCF2127_BIT_CTRL2_TSF2)
- pcf2127_rtc_ts_snapshot(dev);
+ if (ctrl4 & PCF2131_CTRL4_IRQ_MASK) {
+ int i;
+ int tsf_bit = PCF2131_BIT_CTRL4_TSF1; /* Start at bit 7. */
- if (ctrl1 & PCF2127_CTRL1_IRQ_MASK)
- regmap_write(pcf2127->regmap, PCF2127_REG_CTRL1,
- ctrl1 & ~PCF2127_CTRL1_IRQ_MASK);
+ for (i = 0; i < pcf2127->cfg->ts_count; i++) {
+ if (ctrl4 & tsf_bit)
+ pcf2127_rtc_ts_snapshot(dev, i);
- if (ctrl2 & PCF2127_CTRL2_IRQ_MASK)
- regmap_write(pcf2127->regmap, PCF2127_REG_CTRL2,
- ctrl2 & ~PCF2127_CTRL2_IRQ_MASK);
+ tsf_bit = tsf_bit >> 1;
+ }
+
+ regmap_write(pcf2127->regmap, PCF2131_REG_CTRL4,
+ ctrl4 & ~PCF2131_CTRL4_IRQ_MASK);
+ }
+
+ if (ctrl2 & PCF2131_CTRL2_IRQ_MASK)
+ regmap_write(pcf2127->regmap, PCF2127_REG_CTRL2,
+ ctrl2 & ~PCF2131_CTRL2_IRQ_MASK);
+ }
if (ctrl2 & PCF2127_BIT_CTRL2_AF)
rtc_update_irq(pcf2127->rtc, 1, RTC_IRQF | RTC_AF);
@@ -552,28 +745,41 @@ static const struct rtc_class_ops pcf2127_rtc_ops = {
/* sysfs interface */
-static ssize_t timestamp0_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t timestamp_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count, int ts_id)
{
struct pcf2127 *pcf2127 = dev_get_drvdata(dev->parent);
int ret;
+ if (ts_id >= pcf2127->cfg->ts_count)
+ return 0;
+
if (pcf2127->irq_enabled) {
- pcf2127->ts_valid = false;
+ pcf2127->ts_valid[ts_id] = false;
} else {
- ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL1,
- PCF2127_BIT_CTRL1_TSF1, 0);
+ /* Always clear GND interrupt bit. */
+ ret = regmap_update_bits(pcf2127->regmap,
+ pcf2127->cfg->ts[ts_id].gnd_detect_reg,
+ pcf2127->cfg->ts[ts_id].gnd_detect_bit,
+ 0);
+
if (ret) {
- dev_err(dev, "%s: update ctrl1 ret=%d\n", __func__, ret);
+ dev_err(dev, "%s: update TS gnd detect ret=%d\n", __func__, ret);
return ret;
}
- ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL2,
- PCF2127_BIT_CTRL2_TSF2, 0);
- if (ret) {
- dev_err(dev, "%s: update ctrl2 ret=%d\n", __func__, ret);
- return ret;
+ if (pcf2127->cfg->ts[ts_id].inter_detect_bit) {
+ /* Clear intermediate level interrupt bit if supported. */
+ ret = regmap_update_bits(pcf2127->regmap,
+ pcf2127->cfg->ts[ts_id].inter_detect_reg,
+ pcf2127->cfg->ts[ts_id].inter_detect_bit,
+ 0);
+ if (ret) {
+ dev_err(dev, "%s: update TS intermediate level detect ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
}
ret = pcf2127_wdt_active_ping(&pcf2127->wdd);
@@ -582,34 +788,84 @@ static ssize_t timestamp0_store(struct device *dev,
}
return count;
+}
+
+static ssize_t timestamp0_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return timestamp_store(dev, attr, buf, count, 0);
};
-static ssize_t timestamp0_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t timestamp1_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return timestamp_store(dev, attr, buf, count, 1);
+};
+
+static ssize_t timestamp2_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return timestamp_store(dev, attr, buf, count, 2);
+};
+
+static ssize_t timestamp3_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return timestamp_store(dev, attr, buf, count, 3);
+};
+
+static ssize_t timestamp_show(struct device *dev,
+ struct device_attribute *attr, char *buf,
+ int ts_id)
{
struct pcf2127 *pcf2127 = dev_get_drvdata(dev->parent);
- unsigned int ctrl1, ctrl2;
int ret;
time64_t ts;
+ if (ts_id >= pcf2127->cfg->ts_count)
+ return 0;
+
if (pcf2127->irq_enabled) {
- if (!pcf2127->ts_valid)
+ if (!pcf2127->ts_valid[ts_id])
return 0;
- ts = pcf2127->ts;
+ ts = pcf2127->ts[ts_id];
} else {
- ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL1, &ctrl1);
- if (ret)
- return 0;
+ u8 valid_low = 0;
+ u8 valid_inter = 0;
+ unsigned int ctrl;
- ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2);
+ /* Check if TS input pin is driven to GND, supported by all
+ * variants.
+ */
+ ret = regmap_read(pcf2127->regmap,
+ pcf2127->cfg->ts[ts_id].gnd_detect_reg,
+ &ctrl);
if (ret)
return 0;
- if (!(ctrl1 & PCF2127_BIT_CTRL1_TSF1) &&
- !(ctrl2 & PCF2127_BIT_CTRL2_TSF2))
+ valid_low = ctrl & pcf2127->cfg->ts[ts_id].gnd_detect_bit;
+
+ if (pcf2127->cfg->ts[ts_id].inter_detect_bit) {
+ /* Check if TS input pin is driven to intermediate level
+ * between GND and supply, if supported by variant.
+ */
+ ret = regmap_read(pcf2127->regmap,
+ pcf2127->cfg->ts[ts_id].inter_detect_reg,
+ &ctrl);
+ if (ret)
+ return 0;
+
+ valid_inter = ctrl & pcf2127->cfg->ts[ts_id].inter_detect_bit;
+ }
+
+ if (!valid_low && !valid_inter)
return 0;
- ret = pcf2127_rtc_ts_read(dev->parent, &ts);
+ ret = pcf2127_rtc_ts_read(dev->parent, &ts, ts_id);
if (ret)
return 0;
@@ -618,21 +874,227 @@ static ssize_t timestamp0_show(struct device *dev,
return ret;
}
return sprintf(buf, "%llu\n", (unsigned long long)ts);
+}
+
+static ssize_t timestamp0_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return timestamp_show(dev, attr, buf, 0);
+};
+
+static ssize_t timestamp1_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return timestamp_show(dev, attr, buf, 1);
+};
+
+static ssize_t timestamp2_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return timestamp_show(dev, attr, buf, 2);
+};
+
+static ssize_t timestamp3_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return timestamp_show(dev, attr, buf, 3);
};
static DEVICE_ATTR_RW(timestamp0);
+static DEVICE_ATTR_RW(timestamp1);
+static DEVICE_ATTR_RW(timestamp2);
+static DEVICE_ATTR_RW(timestamp3);
static struct attribute *pcf2127_attrs[] = {
&dev_attr_timestamp0.attr,
NULL
};
-static const struct attribute_group pcf2127_attr_group = {
- .attrs = pcf2127_attrs,
+static struct attribute *pcf2131_attrs[] = {
+ &dev_attr_timestamp0.attr,
+ &dev_attr_timestamp1.attr,
+ &dev_attr_timestamp2.attr,
+ &dev_attr_timestamp3.attr,
+ NULL
};
+static struct pcf21xx_config pcf21xx_cfg[] = {
+ [PCF2127] = {
+ .type = PCF2127,
+ .max_register = 0x1d,
+ .has_nvmem = 1,
+ .has_bit_wd_ctl_cd0 = 1,
+ .wd_val_reg_readable = 1,
+ .has_int_a_b = 0,
+ .reg_time_base = PCF2127_REG_TIME_BASE,
+ .regs_alarm_base = PCF2127_REG_ALARM_BASE,
+ .reg_wd_ctl = PCF2127_REG_WD_CTL,
+ .reg_wd_val = PCF2127_REG_WD_VAL,
+ .reg_clkout = PCF2127_REG_CLKOUT,
+ .wdd_clock_hz_x1000 = PCF2127_WD_CLOCK_HZ_X1000,
+ .wdd_min_hw_heartbeat_ms = PCF2127_WD_MIN_HW_HEARTBEAT_MS,
+ .ts_count = 1,
+ .ts[0] = {
+ .reg_base = PCF2127_REG_TS1_BASE,
+ .gnd_detect_reg = PCF2127_REG_CTRL1,
+ .gnd_detect_bit = PCF2127_BIT_CTRL1_TSF1,
+ .inter_detect_reg = PCF2127_REG_CTRL2,
+ .inter_detect_bit = PCF2127_BIT_CTRL2_TSF2,
+ .ie_reg = PCF2127_REG_CTRL2,
+ .ie_bit = PCF2127_BIT_CTRL2_TSIE,
+ },
+ .attribute_group = {
+ .attrs = pcf2127_attrs,
+ },
+ },
+ [PCF2129] = {
+ .type = PCF2129,
+ .max_register = 0x19,
+ .has_nvmem = 0,
+ .has_bit_wd_ctl_cd0 = 0,
+ .wd_val_reg_readable = 1,
+ .has_int_a_b = 0,
+ .reg_time_base = PCF2127_REG_TIME_BASE,
+ .regs_alarm_base = PCF2127_REG_ALARM_BASE,
+ .reg_wd_ctl = PCF2127_REG_WD_CTL,
+ .reg_wd_val = PCF2127_REG_WD_VAL,
+ .reg_clkout = PCF2127_REG_CLKOUT,
+ .wdd_clock_hz_x1000 = PCF2127_WD_CLOCK_HZ_X1000,
+ .wdd_min_hw_heartbeat_ms = PCF2127_WD_MIN_HW_HEARTBEAT_MS,
+ .ts_count = 1,
+ .ts[0] = {
+ .reg_base = PCF2127_REG_TS1_BASE,
+ .gnd_detect_reg = PCF2127_REG_CTRL1,
+ .gnd_detect_bit = PCF2127_BIT_CTRL1_TSF1,
+ .inter_detect_reg = PCF2127_REG_CTRL2,
+ .inter_detect_bit = PCF2127_BIT_CTRL2_TSF2,
+ .ie_reg = PCF2127_REG_CTRL2,
+ .ie_bit = PCF2127_BIT_CTRL2_TSIE,
+ },
+ .attribute_group = {
+ .attrs = pcf2127_attrs,
+ },
+ },
+ [PCF2131] = {
+ .type = PCF2131,
+ .max_register = 0x36,
+ .has_nvmem = 0,
+ .has_bit_wd_ctl_cd0 = 0,
+ .wd_val_reg_readable = 0,
+ .has_int_a_b = 1,
+ .reg_time_base = PCF2131_REG_TIME_BASE,
+ .regs_alarm_base = PCF2131_REG_ALARM_BASE,
+ .reg_wd_ctl = PCF2131_REG_WD_CTL,
+ .reg_wd_val = PCF2131_REG_WD_VAL,
+ .reg_clkout = PCF2131_REG_CLKOUT,
+ .wdd_clock_hz_x1000 = PCF2131_WD_CLOCK_HZ_X1000,
+ .wdd_min_hw_heartbeat_ms = PCF2131_WD_MIN_HW_HEARTBEAT_MS,
+ .ts_count = 4,
+ .ts[0] = {
+ .reg_base = PCF2131_REG_TS1_BASE,
+ .gnd_detect_reg = PCF2131_REG_CTRL4,
+ .gnd_detect_bit = PCF2131_BIT_CTRL4_TSF1,
+ .inter_detect_bit = 0,
+ .ie_reg = PCF2131_REG_CTRL5,
+ .ie_bit = PCF2131_BIT_CTRL5_TSIE1,
+ },
+ .ts[1] = {
+ .reg_base = PCF2131_REG_TS2_BASE,
+ .gnd_detect_reg = PCF2131_REG_CTRL4,
+ .gnd_detect_bit = PCF2131_BIT_CTRL4_TSF2,
+ .inter_detect_bit = 0,
+ .ie_reg = PCF2131_REG_CTRL5,
+ .ie_bit = PCF2131_BIT_CTRL5_TSIE2,
+ },
+ .ts[2] = {
+ .reg_base = PCF2131_REG_TS3_BASE,
+ .gnd_detect_reg = PCF2131_REG_CTRL4,
+ .gnd_detect_bit = PCF2131_BIT_CTRL4_TSF3,
+ .inter_detect_bit = 0,
+ .ie_reg = PCF2131_REG_CTRL5,
+ .ie_bit = PCF2131_BIT_CTRL5_TSIE3,
+ },
+ .ts[3] = {
+ .reg_base = PCF2131_REG_TS4_BASE,
+ .gnd_detect_reg = PCF2131_REG_CTRL4,
+ .gnd_detect_bit = PCF2131_BIT_CTRL4_TSF4,
+ .inter_detect_bit = 0,
+ .ie_reg = PCF2131_REG_CTRL5,
+ .ie_bit = PCF2131_BIT_CTRL5_TSIE4,
+ },
+ .attribute_group = {
+ .attrs = pcf2131_attrs,
+ },
+ },
+};
+
+/*
+ * Enable timestamp function and corresponding interrupt(s).
+ */
+static int pcf2127_enable_ts(struct device *dev, int ts_id)
+{
+ struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+ int ret;
+
+ if (ts_id >= pcf2127->cfg->ts_count) {
+ dev_err(dev, "%s: invalid tamper detection ID (%d)\n",
+ __func__, ts_id);
+ return -EINVAL;
+ }
+
+ /* Enable timestamp function. */
+ ret = regmap_update_bits(pcf2127->regmap,
+ pcf2127->cfg->ts[ts_id].reg_base,
+ PCF2127_BIT_TS_CTRL_TSOFF |
+ PCF2127_BIT_TS_CTRL_TSM,
+ PCF2127_BIT_TS_CTRL_TSM);
+ if (ret) {
+ dev_err(dev, "%s: tamper detection config (ts%d_ctrl) failed\n",
+ __func__, ts_id);
+ return ret;
+ }
+
+ /*
+ * Enable interrupt generation when TSF timestamp flag is set.
+ * Interrupt signals are open-drain outputs and can be left floating if
+ * unused.
+ */
+ ret = regmap_update_bits(pcf2127->regmap, pcf2127->cfg->ts[ts_id].ie_reg,
+ pcf2127->cfg->ts[ts_id].ie_bit,
+ pcf2127->cfg->ts[ts_id].ie_bit);
+ if (ret) {
+ dev_err(dev, "%s: tamper detection TSIE%d config failed\n",
+ __func__, ts_id);
+ return ret;
+ }
+
+ return ret;
+}
+
+/* Route all interrupt sources to INT A pin. */
+static int pcf2127_configure_interrupt_pins(struct device *dev)
+{
+ struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+ int ret;
+
+ /* Mask bits need to be cleared to enable corresponding
+ * interrupt source.
+ */
+ ret = regmap_write(pcf2127->regmap,
+ PCF2131_REG_INT_A_MASK1, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(pcf2127->regmap,
+ PCF2131_REG_INT_A_MASK2, 0);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static int pcf2127_probe(struct device *dev, struct regmap *regmap,
- int alarm_irq, const char *name, bool is_pcf2127)
+ int alarm_irq, const struct pcf21xx_config *config)
{
struct pcf2127 *pcf2127;
int ret = 0;
@@ -645,6 +1107,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
return -ENOMEM;
pcf2127->regmap = regmap;
+ pcf2127->cfg = config;
dev_set_drvdata(dev, pcf2127);
@@ -656,8 +1119,16 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
pcf2127->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
pcf2127->rtc->range_max = RTC_TIMESTAMP_END_2099;
pcf2127->rtc->set_start_time = true; /* Sets actual start to 1970 */
- set_bit(RTC_FEATURE_ALARM_RES_2S, pcf2127->rtc->features);
- clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, pcf2127->rtc->features);
+
+ /*
+ * PCF2127/29 do not work correctly when setting alarms at 1s intervals.
+ * PCF2131 is ok.
+ */
+ if (pcf2127->cfg->type == PCF2127 || pcf2127->cfg->type == PCF2129) {
+ set_bit(RTC_FEATURE_ALARM_RES_2S, pcf2127->rtc->features);
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, pcf2127->rtc->features);
+ }
+
clear_bit(RTC_FEATURE_ALARM, pcf2127->rtc->features);
if (alarm_irq > 0) {
@@ -688,7 +1159,16 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
set_bit(RTC_FEATURE_ALARM, pcf2127->rtc->features);
}
- if (is_pcf2127) {
+ if (pcf2127->cfg->has_int_a_b) {
+ /* Configure int A/B pins, independently of alarm_irq. */
+ ret = pcf2127_configure_interrupt_pins(dev);
+ if (ret) {
+ dev_err(dev, "failed to configure interrupt pins\n");
+ return ret;
+ }
+ }
+
+ if (pcf2127->cfg->has_nvmem) {
struct nvmem_config nvmem_cfg = {
.priv = pcf2127,
.reg_read = pcf2127_nvmem_read,
@@ -703,15 +1183,17 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
* The "Power-On Reset Override" facility prevents the RTC to do a reset
* after power on. For normal operation the PORO must be disabled.
*/
- regmap_clear_bits(pcf2127->regmap, PCF2127_REG_CTRL1,
+ ret = regmap_clear_bits(pcf2127->regmap, PCF2127_REG_CTRL1,
PCF2127_BIT_CTRL1_POR_OVRD);
+ if (ret < 0)
+ return ret;
- ret = regmap_read(pcf2127->regmap, PCF2127_REG_CLKOUT, &val);
+ ret = regmap_read(pcf2127->regmap, pcf2127->cfg->reg_clkout, &val);
if (ret < 0)
return ret;
if (!(val & PCF2127_BIT_CLKOUT_OTPR)) {
- ret = regmap_set_bits(pcf2127->regmap, PCF2127_REG_CLKOUT,
+ ret = regmap_set_bits(pcf2127->regmap, pcf2127->cfg->reg_clkout,
PCF2127_BIT_CLKOUT_OTPR);
if (ret < 0)
return ret;
@@ -721,20 +1203,20 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
/*
* Watchdog timer enabled and reset pin /RST activated when timed out.
- * Select 1Hz clock source for watchdog timer.
+ * Select 1Hz clock source for watchdog timer (1/4Hz for PCF2131).
* Note: Countdown timer disabled and not available.
- * For pca2129, pcf2129, only bit[7] is for Symbol WD_CD
+ * For pca2129, pcf2129 and pcf2131, only bit[7] is for Symbol WD_CD
* of register watchdg_tim_ctl. The bit[6] is labeled
* as T. Bits labeled as T must always be written with
* logic 0.
*/
- ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_WD_CTL,
+ ret = regmap_update_bits(pcf2127->regmap, pcf2127->cfg->reg_wd_ctl,
PCF2127_BIT_WD_CTL_CD1 |
PCF2127_BIT_WD_CTL_CD0 |
PCF2127_BIT_WD_CTL_TF1 |
PCF2127_BIT_WD_CTL_TF0,
PCF2127_BIT_WD_CTL_CD1 |
- (is_pcf2127 ? PCF2127_BIT_WD_CTL_CD0 : 0) |
+ (pcf2127->cfg->has_bit_wd_ctl_cd0 ? PCF2127_BIT_WD_CTL_CD0 : 0) |
PCF2127_BIT_WD_CTL_TF1);
if (ret) {
dev_err(dev, "%s: watchdog config (wd_ctl) failed\n", __func__);
@@ -760,34 +1242,15 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
}
/*
- * Enable timestamp function and store timestamp of first trigger
- * event until TSF1 and TSF2 interrupt flags are cleared.
- */
- ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_TS_CTRL,
- PCF2127_BIT_TS_CTRL_TSOFF |
- PCF2127_BIT_TS_CTRL_TSM,
- PCF2127_BIT_TS_CTRL_TSM);
- if (ret) {
- dev_err(dev, "%s: tamper detection config (ts_ctrl) failed\n",
- __func__);
- return ret;
- }
-
- /*
- * Enable interrupt generation when TSF1 or TSF2 timestamp flags
- * are set. Interrupt signal is an open-drain output and can be
- * left floating if unused.
+ * Enable timestamp functions 1 to 4.
*/
- ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL2,
- PCF2127_BIT_CTRL2_TSIE,
- PCF2127_BIT_CTRL2_TSIE);
- if (ret) {
- dev_err(dev, "%s: tamper detection config (ctrl2) failed\n",
- __func__);
- return ret;
+ for (int i = 0; i < pcf2127->cfg->ts_count; i++) {
+ ret = pcf2127_enable_ts(dev, i);
+ if (ret)
+ return ret;
}
- ret = rtc_add_group(pcf2127->rtc, &pcf2127_attr_group);
+ ret = rtc_add_group(pcf2127->rtc, &pcf2127->cfg->attribute_group);
if (ret) {
dev_err(dev, "%s: tamper sysfs registering failed\n",
__func__);
@@ -799,9 +1262,10 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
#ifdef CONFIG_OF
static const struct of_device_id pcf2127_of_match[] = {
- { .compatible = "nxp,pcf2127" },
- { .compatible = "nxp,pcf2129" },
- { .compatible = "nxp,pca2129" },
+ { .compatible = "nxp,pcf2127", .data = &pcf21xx_cfg[PCF2127] },
+ { .compatible = "nxp,pcf2129", .data = &pcf21xx_cfg[PCF2129] },
+ { .compatible = "nxp,pca2129", .data = &pcf21xx_cfg[PCF2129] },
+ { .compatible = "nxp,pcf2131", .data = &pcf21xx_cfg[PCF2131] },
{}
};
MODULE_DEVICE_TABLE(of, pcf2127_of_match);
@@ -886,26 +1350,41 @@ static const struct regmap_bus pcf2127_i2c_regmap = {
static struct i2c_driver pcf2127_i2c_driver;
static const struct i2c_device_id pcf2127_i2c_id[] = {
- { "pcf2127", 1 },
- { "pcf2129", 0 },
- { "pca2129", 0 },
+ { "pcf2127", PCF2127 },
+ { "pcf2129", PCF2129 },
+ { "pca2129", PCF2129 },
+ { "pcf2131", PCF2131 },
{ }
};
MODULE_DEVICE_TABLE(i2c, pcf2127_i2c_id);
static int pcf2127_i2c_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_match_id(pcf2127_i2c_id, client);
struct regmap *regmap;
- static const struct regmap_config config = {
+ static struct regmap_config config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = 0x1d,
};
+ const struct pcf21xx_config *variant;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
+ if (client->dev.of_node) {
+ variant = of_device_get_match_data(&client->dev);
+ if (!variant)
+ return -ENODEV;
+ } else {
+ enum pcf21xx_type type =
+ i2c_match_id(pcf2127_i2c_id, client)->driver_data;
+
+ if (type >= PCF21XX_LAST_ID)
+ return -ENODEV;
+ variant = &pcf21xx_cfg[type];
+ }
+
+ config.max_register = variant->max_register,
+
regmap = devm_regmap_init(&client->dev, &pcf2127_i2c_regmap,
&client->dev, &config);
if (IS_ERR(regmap)) {
@@ -914,8 +1393,7 @@ static int pcf2127_i2c_probe(struct i2c_client *client)
return PTR_ERR(regmap);
}
- return pcf2127_probe(&client->dev, regmap, client->irq,
- pcf2127_i2c_driver.driver.name, id->driver_data);
+ return pcf2127_probe(&client->dev, regmap, client->irq, variant);
}
static struct i2c_driver pcf2127_i2c_driver = {
@@ -953,17 +1431,32 @@ static void pcf2127_i2c_unregister_driver(void)
#if IS_ENABLED(CONFIG_SPI_MASTER)
static struct spi_driver pcf2127_spi_driver;
+static const struct spi_device_id pcf2127_spi_id[];
static int pcf2127_spi_probe(struct spi_device *spi)
{
- static const struct regmap_config config = {
+ static struct regmap_config config = {
.reg_bits = 8,
.val_bits = 8,
.read_flag_mask = 0xa0,
.write_flag_mask = 0x20,
- .max_register = 0x1d,
};
struct regmap *regmap;
+ const struct pcf21xx_config *variant;
+
+ if (spi->dev.of_node) {
+ variant = of_device_get_match_data(&spi->dev);
+ if (!variant)
+ return -ENODEV;
+ } else {
+ enum pcf21xx_type type = spi_get_device_id(spi)->driver_data;
+
+ if (type >= PCF21XX_LAST_ID)
+ return -ENODEV;
+ variant = &pcf21xx_cfg[type];
+ }
+
+ config.max_register = variant->max_register,
regmap = devm_regmap_init_spi(spi, &config);
if (IS_ERR(regmap)) {
@@ -972,15 +1465,14 @@ static int pcf2127_spi_probe(struct spi_device *spi)
return PTR_ERR(regmap);
}
- return pcf2127_probe(&spi->dev, regmap, spi->irq,
- pcf2127_spi_driver.driver.name,
- spi_get_device_id(spi)->driver_data);
+ return pcf2127_probe(&spi->dev, regmap, spi->irq, variant);
}
static const struct spi_device_id pcf2127_spi_id[] = {
- { "pcf2127", 1 },
- { "pcf2129", 0 },
- { "pca2129", 0 },
+ { "pcf2127", PCF2127 },
+ { "pcf2129", PCF2129 },
+ { "pca2129", PCF2129 },
+ { "pcf2131", PCF2131 },
{ }
};
MODULE_DEVICE_TABLE(spi, pcf2127_spi_id);
@@ -1045,5 +1537,5 @@ static void __exit pcf2127_exit(void)
module_exit(pcf2127_exit)
MODULE_AUTHOR("Renaud Cerrato <r.cerrato@til-technologies.fr>");
-MODULE_DESCRIPTION("NXP PCF2127/29 RTC driver");
+MODULE_DESCRIPTION("NXP PCF2127/29/31 RTC driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index e517abfaee2a..fdbc07f14036 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -14,7 +14,7 @@
#include <linux/bcd.h>
#include <linux/rtc.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
@@ -514,49 +514,40 @@ static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063)
}
#endif
-enum pcf85063_type {
- PCF85063,
- PCF85063TP,
- PCF85063A,
- RV8263,
- PCF85063_LAST_ID
+static const struct pcf85063_config config_pcf85063 = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x0a,
+ },
};
-static struct pcf85063_config pcf85063_cfg[] = {
- [PCF85063] = {
- .regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = 0x0a,
- },
- },
- [PCF85063TP] = {
- .regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = 0x0a,
- },
- },
- [PCF85063A] = {
- .regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = 0x11,
- },
- .has_alarms = 1,
+static const struct pcf85063_config config_pcf85063tp = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x0a,
},
- [RV8263] = {
- .regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = 0x11,
- },
- .has_alarms = 1,
- .force_cap_7000 = 1,
+};
+
+static const struct pcf85063_config config_pcf85063a = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x11,
},
+ .has_alarms = 1,
};
-static const struct i2c_device_id pcf85063_ids[];
+static const struct pcf85063_config config_rv8263 = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x11,
+ },
+ .has_alarms = 1,
+ .force_cap_7000 = 1,
+};
static int pcf85063_probe(struct i2c_client *client)
{
@@ -579,17 +570,9 @@ static int pcf85063_probe(struct i2c_client *client)
if (!pcf85063)
return -ENOMEM;
- if (client->dev.of_node) {
- config = of_device_get_match_data(&client->dev);
- if (!config)
- return -ENODEV;
- } else {
- enum pcf85063_type type =
- i2c_match_id(pcf85063_ids, client)->driver_data;
- if (type >= PCF85063_LAST_ID)
- return -ENODEV;
- config = &pcf85063_cfg[type];
- }
+ config = i2c_get_match_data(client);
+ if (!config)
+ return -ENODEV;
pcf85063->regmap = devm_regmap_init_i2c(client, &config->regmap);
if (IS_ERR(pcf85063->regmap))
@@ -655,22 +638,22 @@ static int pcf85063_probe(struct i2c_client *client)
}
static const struct i2c_device_id pcf85063_ids[] = {
- { "pca85073a", PCF85063A },
- { "pcf85063", PCF85063 },
- { "pcf85063tp", PCF85063TP },
- { "pcf85063a", PCF85063A },
- { "rv8263", RV8263 },
+ { "pca85073a", .driver_data = (kernel_ulong_t)&config_pcf85063a },
+ { "pcf85063", .driver_data = (kernel_ulong_t)&config_pcf85063 },
+ { "pcf85063tp", .driver_data = (kernel_ulong_t)&config_pcf85063tp },
+ { "pcf85063a", .driver_data = (kernel_ulong_t)&config_pcf85063a },
+ { "rv8263", .driver_data = (kernel_ulong_t)&config_rv8263 },
{}
};
MODULE_DEVICE_TABLE(i2c, pcf85063_ids);
#ifdef CONFIG_OF
static const struct of_device_id pcf85063_of_match[] = {
- { .compatible = "nxp,pca85073a", .data = &pcf85063_cfg[PCF85063A] },
- { .compatible = "nxp,pcf85063", .data = &pcf85063_cfg[PCF85063] },
- { .compatible = "nxp,pcf85063tp", .data = &pcf85063_cfg[PCF85063TP] },
- { .compatible = "nxp,pcf85063a", .data = &pcf85063_cfg[PCF85063A] },
- { .compatible = "microcrystal,rv8263", .data = &pcf85063_cfg[RV8263] },
+ { .compatible = "nxp,pca85073a", .data = &config_pcf85063a },
+ { .compatible = "nxp,pcf85063", .data = &config_pcf85063 },
+ { .compatible = "nxp,pcf85063tp", .data = &config_pcf85063tp },
+ { .compatible = "nxp,pcf85063a", .data = &config_pcf85063a },
+ { .compatible = "microcrystal,rv8263", .data = &config_rv8263 },
{}
};
MODULE_DEVICE_TABLE(of, pcf85063_of_match);
diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
index 65b8b1338dbb..06194674d71c 100644
--- a/drivers/rtc/rtc-pcf85363.c
+++ b/drivers/rtc/rtc-pcf85363.c
@@ -15,7 +15,6 @@
#include <linux/errno.h>
#include <linux/bcd.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
/*
@@ -403,6 +402,7 @@ static int pcf85363_probe(struct i2c_client *client)
},
};
int ret, i, err;
+ bool wakeup_source;
if (data)
config = data;
@@ -432,25 +432,36 @@ static int pcf85363_probe(struct i2c_client *client)
pcf85363->rtc->ops = &rtc_ops;
pcf85363->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
pcf85363->rtc->range_max = RTC_TIMESTAMP_END_2099;
- clear_bit(RTC_FEATURE_ALARM, pcf85363->rtc->features);
+
+ wakeup_source = device_property_read_bool(&client->dev,
+ "wakeup-source");
+ if (client->irq > 0 || wakeup_source) {
+ regmap_write(pcf85363->regmap, CTRL_FLAGS, 0);
+ regmap_update_bits(pcf85363->regmap, CTRL_PIN_IO,
+ PIN_IO_INTA_OUT, PIN_IO_INTAPM);
+ }
if (client->irq > 0) {
unsigned long irqflags = IRQF_TRIGGER_LOW;
if (dev_fwnode(&client->dev))
irqflags = 0;
-
- regmap_write(pcf85363->regmap, CTRL_FLAGS, 0);
- regmap_update_bits(pcf85363->regmap, CTRL_PIN_IO,
- PIN_IO_INTA_OUT, PIN_IO_INTAPM);
ret = devm_request_threaded_irq(&client->dev, client->irq,
NULL, pcf85363_rtc_handle_irq,
irqflags | IRQF_ONESHOT,
"pcf85363", client);
- if (ret)
- dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n");
- else
- set_bit(RTC_FEATURE_ALARM, pcf85363->rtc->features);
+ if (ret) {
+ dev_warn(&client->dev,
+ "unable to request IRQ, alarms disabled\n");
+ client->irq = 0;
+ }
+ }
+
+ if (client->irq > 0 || wakeup_source) {
+ device_init_wakeup(&client->dev, true);
+ set_bit(RTC_FEATURE_ALARM, pcf85363->rtc->features);
+ } else {
+ clear_bit(RTC_FEATURE_ALARM, pcf85363->rtc->features);
}
ret = devm_rtc_register_device(pcf85363->rtc);
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index eeacf480cf36..e400c78252e8 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -14,7 +14,6 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include "rtc-sa1100.h"
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index a5a6c8772ecd..f8fab0205f8c 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -12,7 +12,7 @@
#include <linux/bcd.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
/*
* Ricoh has a family of I2C based RTCs, which differ only slightly from
@@ -826,8 +826,7 @@ static int rs5c372_probe(struct i2c_client *client)
rs5c372->client = client;
i2c_set_clientdata(client, rs5c372);
if (client->dev.of_node) {
- rs5c372->type = (enum rtc_type)
- of_device_get_match_data(&client->dev);
+ rs5c372->type = (uintptr_t)of_device_get_match_data(&client->dev);
} else {
const struct i2c_device_id *id = i2c_match_id(rs5c372_id, client);
rs5c372->type = id->driver_data;
diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
index 076e56f4e01a..2f001c59c61d 100644
--- a/drivers/rtc/rtc-rv3028.c
+++ b/drivers/rtc/rtc-rv3028.c
@@ -17,7 +17,7 @@
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
@@ -855,11 +855,68 @@ static const struct regmap_config regmap_config = {
.max_register = 0x37,
};
+static u8 rv3028_set_trickle_charger(struct rv3028_data *rv3028,
+ struct i2c_client *client)
+{
+ int ret, val_old, val;
+ u32 ohms, chargeable;
+
+ ret = regmap_read(rv3028->regmap, RV3028_BACKUP, &val_old);
+ if (ret < 0)
+ return ret;
+
+ /* mask out only trickle charger bits */
+ val_old = val_old & (RV3028_BACKUP_TCE | RV3028_BACKUP_TCR_MASK);
+ val = val_old;
+
+ /* setup trickle charger */
+ if (!device_property_read_u32(&client->dev, "trickle-resistor-ohms",
+ &ohms)) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rv3028_trickle_resistors); i++)
+ if (ohms == rv3028_trickle_resistors[i])
+ break;
+
+ if (i < ARRAY_SIZE(rv3028_trickle_resistors)) {
+ /* enable trickle charger and its resistor */
+ val = RV3028_BACKUP_TCE | i;
+ } else {
+ dev_warn(&client->dev, "invalid trickle resistor value\n");
+ }
+ }
+
+ if (!device_property_read_u32(&client->dev, "aux-voltage-chargeable",
+ &chargeable)) {
+ switch (chargeable) {
+ case 0:
+ val &= ~RV3028_BACKUP_TCE;
+ break;
+ case 1:
+ val |= RV3028_BACKUP_TCE;
+ break;
+ default:
+ dev_warn(&client->dev,
+ "unsupported aux-voltage-chargeable value\n");
+ break;
+ }
+ }
+
+ /* only update EEPROM if changes are necessary */
+ if (val_old != val) {
+ ret = rv3028_update_cfg(rv3028, RV3028_BACKUP, RV3028_BACKUP_TCE |
+ RV3028_BACKUP_TCR_MASK, val);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
static int rv3028_probe(struct i2c_client *client)
{
struct rv3028_data *rv3028;
int ret, status;
- u32 ohms;
struct nvmem_config nvmem_cfg = {
.name = "rv3028_nvram",
.word_size = 1,
@@ -937,24 +994,9 @@ static int rv3028_probe(struct i2c_client *client)
if (ret)
return ret;
- /* setup trickle charger */
- if (!device_property_read_u32(&client->dev, "trickle-resistor-ohms",
- &ohms)) {
- int i;
-
- for (i = 0; i < ARRAY_SIZE(rv3028_trickle_resistors); i++)
- if (ohms == rv3028_trickle_resistors[i])
- break;
-
- if (i < ARRAY_SIZE(rv3028_trickle_resistors)) {
- ret = rv3028_update_cfg(rv3028, RV3028_BACKUP, RV3028_BACKUP_TCE |
- RV3028_BACKUP_TCR_MASK, RV3028_BACKUP_TCE | i);
- if (ret)
- return ret;
- } else {
- dev_warn(&client->dev, "invalid trickle resistor value\n");
- }
- }
+ ret = rv3028_set_trickle_charger(rv3028, client);
+ if (ret)
+ return ret;
ret = rtc_add_group(rv3028->rtc, &rv3028_attr_group);
if (ret)
diff --git a/drivers/rtc/rtc-rv3032.c b/drivers/rtc/rtc-rv3032.c
index 6b8eb2039a33..35b2e36b426a 100644
--- a/drivers/rtc/rtc-rv3032.c
+++ b/drivers/rtc/rtc-rv3032.c
@@ -19,7 +19,7 @@
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index 98679cae13e8..1a3ec1bb5b81 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -15,7 +15,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/rtc.h>
#define RV8803_I2C_TRY_COUNT 4
@@ -645,8 +645,7 @@ static int rv8803_probe(struct i2c_client *client)
mutex_init(&rv8803->flags_lock);
rv8803->client = client;
if (client->dev.of_node) {
- rv8803->type = (enum rv8803_type)
- of_device_get_match_data(&client->dev);
+ rv8803->type = (uintptr_t)of_device_get_match_data(&client->dev);
} else {
const struct i2c_device_id *id = i2c_match_id(rv8803_id, client);
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index 8702db6096ba..834274db8c3f 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -13,7 +13,6 @@
#include <linux/regmap.h>
#include <linux/rtc.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/i2c.h>
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index 82881fd2e14a..48efd61a114d 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -13,7 +13,6 @@
#include <linux/i2c.h>
#include <linux/bcd.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
#include <linux/log2.h>
diff --git a/drivers/rtc/rtc-rzn1.c b/drivers/rtc/rtc-rzn1.c
index dca736caba85..56ebbd4d0481 100644
--- a/drivers/rtc/rtc-rzn1.c
+++ b/drivers/rtc/rtc-rzn1.c
@@ -15,7 +15,7 @@
#include <linux/init.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/rtc.h>
@@ -227,7 +227,7 @@ static int rzn1_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
return ret;
/* We cannot set alarms more than one week ahead */
- farest = rtc_tm_to_time64(&tm_now) + (7 * 86400);
+ farest = rtc_tm_to_time64(&tm_now) + rtc->rtcdev->alarm_offset_max;
alarm = rtc_tm_to_time64(tm);
if (time_after(alarm, farest))
return -ERANGE;
@@ -351,6 +351,7 @@ static int rzn1_rtc_probe(struct platform_device *pdev)
rtc->rtcdev->range_min = RTC_TIMESTAMP_BEGIN_2000;
rtc->rtcdev->range_max = RTC_TIMESTAMP_END_2099;
+ rtc->rtcdev->alarm_offset_max = 7 * 86400;
rtc->rtcdev->ops = &rzn1_rtc_ops;
set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->rtcdev->features);
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtcdev->features);
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 70e1a18e5efd..282238818f63 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -23,7 +23,6 @@
#include <linux/log2.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/uaccess.h>
#include <linux/io.h>
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index 3d36e11cff80..76753c71d92e 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -6,11 +6,13 @@
#include <linux/bcd.h>
#include <linux/clk.h>
+#include <linux/errno.h>
#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
@@ -89,6 +91,9 @@
/* Max STM32 RTC register offset is 0x3FC */
#define UNDEF_REG 0xFFFF
+/* STM32 RTC driver time helpers */
+#define SEC_PER_DAY (24 * 60 * 60)
+
struct stm32_rtc;
struct stm32_rtc_registers {
@@ -114,6 +119,7 @@ struct stm32_rtc_data {
void (*clear_events)(struct stm32_rtc *rtc, unsigned int flags);
bool has_pclk;
bool need_dbp;
+ bool need_accuracy;
};
struct stm32_rtc {
@@ -158,10 +164,9 @@ static int stm32_rtc_enter_init_mode(struct stm32_rtc *rtc)
* slowest rtc_ck frequency may be 32kHz and highest should be
* 1MHz, we poll every 10 us with a timeout of 100ms.
*/
- return readl_relaxed_poll_timeout_atomic(
- rtc->base + regs->isr,
- isr, (isr & STM32_RTC_ISR_INITF),
- 10, 100000);
+ return readl_relaxed_poll_timeout_atomic(rtc->base + regs->isr, isr,
+ (isr & STM32_RTC_ISR_INITF),
+ 10, 100000);
}
return 0;
@@ -425,40 +430,42 @@ static int stm32_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static int stm32_rtc_valid_alrm(struct stm32_rtc *rtc, struct rtc_time *tm)
+static int stm32_rtc_valid_alrm(struct device *dev, struct rtc_time *tm)
{
- const struct stm32_rtc_registers *regs = &rtc->data->regs;
- int cur_day, cur_mon, cur_year, cur_hour, cur_min, cur_sec;
- unsigned int dr = readl_relaxed(rtc->base + regs->dr);
- unsigned int tr = readl_relaxed(rtc->base + regs->tr);
-
- cur_day = (dr & STM32_RTC_DR_DATE) >> STM32_RTC_DR_DATE_SHIFT;
- cur_mon = (dr & STM32_RTC_DR_MONTH) >> STM32_RTC_DR_MONTH_SHIFT;
- cur_year = (dr & STM32_RTC_DR_YEAR) >> STM32_RTC_DR_YEAR_SHIFT;
- cur_sec = (tr & STM32_RTC_TR_SEC) >> STM32_RTC_TR_SEC_SHIFT;
- cur_min = (tr & STM32_RTC_TR_MIN) >> STM32_RTC_TR_MIN_SHIFT;
- cur_hour = (tr & STM32_RTC_TR_HOUR) >> STM32_RTC_TR_HOUR_SHIFT;
+ static struct rtc_time now;
+ time64_t max_alarm_time64;
+ int max_day_forward;
+ int next_month;
+ int next_year;
/*
* Assuming current date is M-D-Y H:M:S.
* RTC alarm can't be set on a specific month and year.
* So the valid alarm range is:
* M-D-Y H:M:S < alarm <= (M+1)-D-Y H:M:S
- * with a specific case for December...
*/
- if ((((tm->tm_year > cur_year) &&
- (tm->tm_mon == 0x1) && (cur_mon == 0x12)) ||
- ((tm->tm_year == cur_year) &&
- (tm->tm_mon <= cur_mon + 1))) &&
- ((tm->tm_mday > cur_day) ||
- ((tm->tm_mday == cur_day) &&
- ((tm->tm_hour > cur_hour) ||
- ((tm->tm_hour == cur_hour) && (tm->tm_min > cur_min)) ||
- ((tm->tm_hour == cur_hour) && (tm->tm_min == cur_min) &&
- (tm->tm_sec >= cur_sec))))))
- return 0;
+ stm32_rtc_read_time(dev, &now);
+
+ /*
+ * Find the next month and the year of the next month.
+ * Note: tm_mon and next_month are from 0 to 11
+ */
+ next_month = now.tm_mon + 1;
+ if (next_month == 12) {
+ next_month = 0;
+ next_year = now.tm_year + 1;
+ } else {
+ next_year = now.tm_year;
+ }
- return -EINVAL;
+ /* Find the maximum limit of alarm in days. */
+ max_day_forward = rtc_month_days(now.tm_mon, now.tm_year)
+ - now.tm_mday
+ + min(rtc_month_days(next_month, next_year), now.tm_mday);
+
+ /* Convert to timestamp and compare the alarm time and its upper limit */
+ max_alarm_time64 = rtc_tm_to_time64(&now) + max_day_forward * SEC_PER_DAY;
+ return rtc_tm_to_time64(tm) <= max_alarm_time64 ? 0 : -EINVAL;
}
static int stm32_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -469,17 +476,17 @@ static int stm32_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
unsigned int cr, isr, alrmar;
int ret = 0;
- tm2bcd(tm);
-
/*
* RTC alarm can't be set on a specific date, unless this date is
* up to the same day of month next month.
*/
- if (stm32_rtc_valid_alrm(rtc, tm) < 0) {
+ if (stm32_rtc_valid_alrm(dev, tm) < 0) {
dev_err(dev, "Alarm can be set only on upcoming month.\n");
return -EINVAL;
}
+ tm2bcd(tm);
+
alrmar = 0;
/* tm_year and tm_mon are not used because not supported by RTC */
alrmar |= (tm->tm_mday << STM32_RTC_ALRMXR_DATE_SHIFT) &
@@ -545,6 +552,7 @@ static void stm32_rtc_clear_events(struct stm32_rtc *rtc,
static const struct stm32_rtc_data stm32_rtc_data = {
.has_pclk = false,
.need_dbp = true,
+ .need_accuracy = false,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -566,6 +574,7 @@ static const struct stm32_rtc_data stm32_rtc_data = {
static const struct stm32_rtc_data stm32h7_rtc_data = {
.has_pclk = true,
.need_dbp = true,
+ .need_accuracy = false,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -596,6 +605,7 @@ static void stm32mp1_rtc_clear_events(struct stm32_rtc *rtc,
static const struct stm32_rtc_data stm32mp1_data = {
.has_pclk = true,
.need_dbp = false,
+ .need_accuracy = true,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -628,7 +638,7 @@ static int stm32_rtc_init(struct platform_device *pdev,
const struct stm32_rtc_registers *regs = &rtc->data->regs;
unsigned int prer, pred_a, pred_s, pred_a_max, pred_s_max, cr;
unsigned int rate;
- int ret = 0;
+ int ret;
rate = clk_get_rate(rtc->rtc_ck);
@@ -636,18 +646,32 @@ static int stm32_rtc_init(struct platform_device *pdev,
pred_a_max = STM32_RTC_PRER_PRED_A >> STM32_RTC_PRER_PRED_A_SHIFT;
pred_s_max = STM32_RTC_PRER_PRED_S >> STM32_RTC_PRER_PRED_S_SHIFT;
- for (pred_a = pred_a_max; pred_a + 1 > 0; pred_a--) {
- pred_s = (rate / (pred_a + 1)) - 1;
+ if (rate > (pred_a_max + 1) * (pred_s_max + 1)) {
+ dev_err(&pdev->dev, "rtc_ck rate is too high: %dHz\n", rate);
+ return -EINVAL;
+ }
+
+ if (rtc->data->need_accuracy) {
+ for (pred_a = 0; pred_a <= pred_a_max; pred_a++) {
+ pred_s = (rate / (pred_a + 1)) - 1;
+
+ if (pred_s <= pred_s_max && ((pred_s + 1) * (pred_a + 1)) == rate)
+ break;
+ }
+ } else {
+ for (pred_a = pred_a_max; pred_a + 1 > 0; pred_a--) {
+ pred_s = (rate / (pred_a + 1)) - 1;
- if (((pred_s + 1) * (pred_a + 1)) == rate)
- break;
+ if (((pred_s + 1) * (pred_a + 1)) == rate)
+ break;
+ }
}
/*
* Can't find a 1Hz, so give priority to RTC power consumption
* by choosing the higher possible value for prediv_a
*/
- if ((pred_s > pred_s_max) || (pred_a > pred_a_max)) {
+ if (pred_s > pred_s_max || pred_a > pred_a_max) {
pred_a = pred_a_max;
pred_s = (rate / (pred_a + 1)) - 1;
@@ -656,6 +680,20 @@ static int stm32_rtc_init(struct platform_device *pdev,
"fast" : "slow");
}
+ cr = readl_relaxed(rtc->base + regs->cr);
+
+ prer = readl_relaxed(rtc->base + regs->prer);
+ prer &= STM32_RTC_PRER_PRED_S | STM32_RTC_PRER_PRED_A;
+
+ pred_s = (pred_s << STM32_RTC_PRER_PRED_S_SHIFT) &
+ STM32_RTC_PRER_PRED_S;
+ pred_a = (pred_a << STM32_RTC_PRER_PRED_A_SHIFT) &
+ STM32_RTC_PRER_PRED_A;
+
+ /* quit if there is nothing to initialize */
+ if ((cr & STM32_RTC_CR_FMT) == 0 && prer == (pred_s | pred_a))
+ return 0;
+
stm32_rtc_wpr_unlock(rtc);
ret = stm32_rtc_enter_init_mode(rtc);
@@ -665,13 +703,10 @@ static int stm32_rtc_init(struct platform_device *pdev,
goto end;
}
- prer = (pred_s << STM32_RTC_PRER_PRED_S_SHIFT) & STM32_RTC_PRER_PRED_S;
- writel_relaxed(prer, rtc->base + regs->prer);
- prer |= (pred_a << STM32_RTC_PRER_PRED_A_SHIFT) & STM32_RTC_PRER_PRED_A;
- writel_relaxed(prer, rtc->base + regs->prer);
+ writel_relaxed(pred_s, rtc->base + regs->prer);
+ writel_relaxed(pred_a | pred_s, rtc->base + regs->prer);
/* Force 24h time format */
- cr = readl_relaxed(rtc->base + regs->cr);
cr &= ~STM32_RTC_CR_FMT;
writel_relaxed(cr, rtc->base + regs->cr);
@@ -730,16 +765,13 @@ static int stm32_rtc_probe(struct platform_device *pdev)
rtc->rtc_ck = devm_clk_get(&pdev->dev, NULL);
} else {
rtc->pclk = devm_clk_get(&pdev->dev, "pclk");
- if (IS_ERR(rtc->pclk)) {
- dev_err(&pdev->dev, "no pclk clock");
- return PTR_ERR(rtc->pclk);
- }
+ if (IS_ERR(rtc->pclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rtc->pclk), "no pclk clock");
+
rtc->rtc_ck = devm_clk_get(&pdev->dev, "rtc_ck");
}
- if (IS_ERR(rtc->rtc_ck)) {
- dev_err(&pdev->dev, "no rtc_ck clock");
- return PTR_ERR(rtc->rtc_ck);
- }
+ if (IS_ERR(rtc->rtc_ck))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rtc->rtc_ck), "no rtc_ck clock");
if (rtc->data->has_pclk) {
ret = clk_prepare_enable(rtc->pclk);
@@ -859,7 +891,6 @@ static void stm32_rtc_remove(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, false);
}
-#ifdef CONFIG_PM_SLEEP
static int stm32_rtc_suspend(struct device *dev)
{
struct stm32_rtc *rtc = dev_get_drvdata(dev);
@@ -890,10 +921,10 @@ static int stm32_rtc_resume(struct device *dev)
return ret;
}
-#endif
-static SIMPLE_DEV_PM_OPS(stm32_rtc_pm_ops,
- stm32_rtc_suspend, stm32_rtc_resume);
+static const struct dev_pm_ops stm32_rtc_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(stm32_rtc_suspend, stm32_rtc_resume)
+};
static struct platform_driver stm32_rtc_driver = {
.probe = stm32_rtc_probe,
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 6f11b745f34d..7566d0a44af8 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -18,7 +18,6 @@
#include <linux/delay.h>
#include <linux/rtc.h>
#include <linux/slab.h>
-#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/stmp_device.h>
#include <linux/stmp3xxx_rtc_wdt.h>
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index 71548dd59a3a..8e0c66906103 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -24,7 +24,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/slab.h>
@@ -847,8 +846,6 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
if (ret)
return ret;
- dev_info(&pdev->dev, "RTC enabled\n");
-
return 0;
}
diff --git a/drivers/rtc/rtc-sunplus.c b/drivers/rtc/rtc-sunplus.c
index f33dc301f301..20c7e97c2fc8 100644
--- a/drivers/rtc/rtc-sunplus.c
+++ b/drivers/rtc/rtc-sunplus.c
@@ -244,7 +244,7 @@ static int sp_rtc_probe(struct platform_device *plat_dev)
sp_rtc->irq = platform_get_irq(plat_dev, 0);
if (sp_rtc->irq < 0)
- return dev_err_probe(&plat_dev->dev, sp_rtc->irq, "platform_get_irq failed\n");
+ return sp_rtc->irq;
ret = devm_request_irq(&plat_dev->dev, sp_rtc->irq, sp_rtc_irq_handler,
IRQF_TRIGGER_RISING, "rtc irq", plat_dev);
diff --git a/drivers/rtc/rtc-sunxi.c b/drivers/rtc/rtc-sunxi.c
index 5d019e3a835a..5cab9953c44f 100644
--- a/drivers/rtc/rtc-sunxi.c
+++ b/drivers/rtc/rtc-sunxi.c
@@ -14,8 +14,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/types.h>
diff --git a/drivers/rtc/rtc-ti-k3.c b/drivers/rtc/rtc-ti-k3.c
index 0d90fe923355..ec759d8f7023 100644
--- a/drivers/rtc/rtc-ti-k3.c
+++ b/drivers/rtc/rtc-ti-k3.c
@@ -9,7 +9,7 @@
#include <linux/delay.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sys_soc.h>
#include <linux/property.h>
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
index 9f14e2475747..20faf08c254c 100644
--- a/drivers/rtc/rtc-tps6586x.c
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -252,6 +252,7 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
rtc->rtc->ops = &tps6586x_rtc_ops;
rtc->rtc->range_max = (1ULL << 30) - 1; /* 30-bit seconds */
+ rtc->rtc->alarm_offset_max = ALM1_VALID_RANGE_IN_SEC;
rtc->rtc->start_secs = mktime64(2009, 1, 1, 0, 0, 0);
rtc->rtc->set_start_time = true;
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index 75e4c2d777b9..411ff66c0468 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -406,11 +406,8 @@ static int tps65910_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tps_rtc);
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_warn(&pdev->dev, "Wake up is not possible as irq = %d\n",
- irq);
- return -ENXIO;
- }
+ if (irq < 0)
+ return irq;
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
tps65910_rtc_interrupt, IRQF_TRIGGER_LOW,
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 81b36948c2fa..13f8ce08243c 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -487,11 +487,24 @@ static const struct rtc_class_ops twl_rtc_ops = {
.alarm_irq_enable = twl_rtc_alarm_irq_enable,
};
+static int twl_nvram_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ return twl_i2c_read((long)priv, val, offset, bytes);
+}
+
+static int twl_nvram_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ return twl_i2c_write((long)priv, val, offset, bytes);
+}
+
/*----------------------------------------------------------------------*/
static int twl_rtc_probe(struct platform_device *pdev)
{
struct twl_rtc *twl_rtc;
+ struct nvmem_config nvmem_cfg;
struct device_node *np = pdev->dev.of_node;
int ret = -EINVAL;
int irq = platform_get_irq(pdev, 0);
@@ -542,7 +555,6 @@ static int twl_rtc_probe(struct platform_device *pdev)
REG_INT_MSK_STS_A);
}
- dev_info(&pdev->dev, "Enabling TWL-RTC\n");
ret = twl_rtc_write_u8(twl_rtc, BIT_RTC_CTRL_REG_STOP_RTC_M,
REG_RTC_CTRL_REG);
if (ret < 0)
@@ -564,11 +576,8 @@ static int twl_rtc_probe(struct platform_device *pdev)
twl_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&twl_rtc_ops, THIS_MODULE);
- if (IS_ERR(twl_rtc->rtc)) {
- dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
- PTR_ERR(twl_rtc->rtc));
+ if (IS_ERR(twl_rtc->rtc))
return PTR_ERR(twl_rtc->rtc);
- }
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
twl_rtc_interrupt,
@@ -579,6 +588,30 @@ static int twl_rtc_probe(struct platform_device *pdev)
return ret;
}
+ memset(&nvmem_cfg, 0, sizeof(nvmem_cfg));
+ nvmem_cfg.name = "twl-secured-";
+ nvmem_cfg.type = NVMEM_TYPE_BATTERY_BACKED;
+ nvmem_cfg.reg_read = twl_nvram_read,
+ nvmem_cfg.reg_write = twl_nvram_write,
+ nvmem_cfg.word_size = 1;
+ nvmem_cfg.stride = 1;
+ if (twl_class_is_4030()) {
+ /* 20 bytes SECURED_REG area */
+ nvmem_cfg.size = 20;
+ nvmem_cfg.priv = (void *)TWL_MODULE_SECURED_REG;
+ devm_rtc_nvmem_register(twl_rtc->rtc, &nvmem_cfg);
+ /* 8 bytes BACKUP area */
+ nvmem_cfg.name = "twl-backup-";
+ nvmem_cfg.size = 8;
+ nvmem_cfg.priv = (void *)TWL4030_MODULE_BACKUP;
+ devm_rtc_nvmem_register(twl_rtc->rtc, &nvmem_cfg);
+ } else {
+ /* 8 bytes SECURED_REG area */
+ nvmem_cfg.size = 8;
+ nvmem_cfg.priv = (void *)TWL_MODULE_SECURED_REG;
+ devm_rtc_nvmem_register(twl_rtc->rtc, &nvmem_cfg);
+ }
+
return 0;
}
diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
index 947f8071803f..3c773cff2b39 100644
--- a/drivers/rtc/rtc-wm8350.c
+++ b/drivers/rtc/rtc-wm8350.c
@@ -386,8 +386,6 @@ static int wm8350_rtc_probe(struct platform_device *pdev)
/* enable the RTC if it's not already enabled */
power5 = wm8350_reg_read(wm8350, WM8350_POWER_MGMT_5);
if (!(power5 & WM8350_RTC_TICK_ENA)) {
- dev_info(wm8350->dev, "Starting RTC\n");
-
wm8350_reg_unlock(wm8350);
ret = wm8350_set_bits(wm8350, WM8350_POWER_MGMT_5,
@@ -426,11 +424,8 @@ static int wm8350_rtc_probe(struct platform_device *pdev)
wm_rtc->rtc = devm_rtc_device_register(&pdev->dev, "wm8350",
&wm8350_rtc_ops, THIS_MODULE);
- if (IS_ERR(wm_rtc->rtc)) {
- ret = PTR_ERR(wm_rtc->rtc);
- dev_err(&pdev->dev, "failed to register RTC: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(wm_rtc->rtc))
+ return PTR_ERR(wm_rtc->rtc);
ret = wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC,
wm8350_rtc_update_handler, 0,
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 620fab01b710..c4e36650c426 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -1378,16 +1378,12 @@ static ssize_t dasd_vendor_show(struct device *dev,
static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL);
-#define UID_STRLEN ( /* vendor */ 3 + 1 + /* serial */ 14 + 1 +\
- /* SSID */ 4 + 1 + /* unit addr */ 2 + 1 +\
- /* vduit */ 32 + 1)
-
static ssize_t
dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
+ char uid_string[DASD_UID_STRLEN];
struct dasd_device *device;
struct dasd_uid uid;
- char uid_string[UID_STRLEN];
char ua_string[3];
device = dasd_device_from_cdev(to_ccwdev(dev));
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 8587e423169e..bd89b032968a 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1079,12 +1079,12 @@ static void dasd_eckd_get_uid_string(struct dasd_conf *conf,
create_uid(conf, &uid);
if (strlen(uid.vduit) > 0)
- snprintf(print_uid, sizeof(*print_uid),
+ snprintf(print_uid, DASD_UID_STRLEN,
"%s.%s.%04x.%02x.%s",
uid.vendor, uid.serial, uid.ssid,
uid.real_unit_addr, uid.vduit);
else
- snprintf(print_uid, sizeof(*print_uid),
+ snprintf(print_uid, DASD_UID_STRLEN,
"%s.%s.%04x.%02x",
uid.vendor, uid.serial, uid.ssid,
uid.real_unit_addr);
@@ -1093,8 +1093,8 @@ static void dasd_eckd_get_uid_string(struct dasd_conf *conf,
static int dasd_eckd_check_cabling(struct dasd_device *device,
void *conf_data, __u8 lpm)
{
+ char print_path_uid[DASD_UID_STRLEN], print_device_uid[DASD_UID_STRLEN];
struct dasd_eckd_private *private = device->private;
- char print_path_uid[60], print_device_uid[60];
struct dasd_conf path_conf;
path_conf.data = conf_data;
@@ -1293,9 +1293,9 @@ static void dasd_eckd_path_available_action(struct dasd_device *device,
__u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
__u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
struct dasd_conf_data *conf_data;
+ char print_uid[DASD_UID_STRLEN];
struct dasd_conf path_conf;
unsigned long flags;
- char print_uid[60];
int rc, pos;
opm = 0;
@@ -5855,8 +5855,8 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
static int dasd_eckd_reload_device(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
+ char print_uid[DASD_UID_STRLEN];
int rc, old_base;
- char print_uid[60];
struct dasd_uid uid;
unsigned long flags;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 0aa56351da72..8a4dbe9d7741 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -259,6 +259,10 @@ struct dasd_uid {
char vduit[33];
};
+#define DASD_UID_STRLEN ( /* vendor */ 3 + 1 + /* serial */ 14 + 1 + \
+ /* SSID */ 4 + 1 + /* unit addr */ 2 + 1 + \
+ /* vduit */ 32 + 1)
+
/*
* PPRC Status data
*/
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 06bcb6c78909..4b7ecd4fd431 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -411,13 +411,13 @@ removeseg:
segment_unload(entry->segment_name);
}
list_del(&dev_info->lh);
+ up_write(&dcssblk_devices_sem);
dax_remove_host(dev_info->gd);
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
del_gendisk(dev_info->gd);
put_disk(dev_info->gd);
- up_write(&dcssblk_devices_sem);
if (device_remove_file_self(dev, attr)) {
device_unregister(dev);
@@ -790,18 +790,17 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
}
list_del(&dev_info->lh);
+ /* unload all related segments */
+ list_for_each_entry(entry, &dev_info->seg_list, lh)
+ segment_unload(entry->segment_name);
+ up_write(&dcssblk_devices_sem);
+
dax_remove_host(dev_info->gd);
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
del_gendisk(dev_info->gd);
put_disk(dev_info->gd);
- /* unload all related segments */
- list_for_each_entry(entry, &dev_info->seg_list, lh)
- segment_unload(entry->segment_name);
-
- up_write(&dcssblk_devices_sem);
-
device_unregister(&dev_info->dev);
put_device(&dev_info->dev);
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 9fa92e45e0ee..7207a7f5842a 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -111,7 +111,7 @@ static inline unsigned long mon_mca_end(struct mon_msg *monmsg)
static inline u8 mon_mca_type(struct mon_msg *monmsg, u8 index)
{
- return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index);
+ return *((u8 *)__va(mon_mca_start(monmsg)) + monmsg->mca_offset + index);
}
static inline u32 mon_mca_size(struct mon_msg *monmsg)
@@ -121,12 +121,12 @@ static inline u32 mon_mca_size(struct mon_msg *monmsg)
static inline u32 mon_rec_start(struct mon_msg *monmsg)
{
- return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4));
+ return *((u32 *)(__va(mon_mca_start(monmsg)) + monmsg->mca_offset + 4));
}
static inline u32 mon_rec_end(struct mon_msg *monmsg)
{
- return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8));
+ return *((u32 *)(__va(mon_mca_start(monmsg)) + monmsg->mca_offset + 8));
}
static int mon_check_mca(struct mon_msg *monmsg)
@@ -392,8 +392,7 @@ static ssize_t mon_read(struct file *filp, char __user *data,
mce_start = mon_mca_start(monmsg) + monmsg->mca_offset;
if ((monmsg->pos >= mce_start) && (monmsg->pos < mce_start + 12)) {
count = min(count, (size_t) mce_start + 12 - monmsg->pos);
- ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
- count);
+ ret = copy_to_user(data, __va(monmsg->pos), count);
if (ret)
return -EFAULT;
monmsg->pos += count;
@@ -406,8 +405,7 @@ static ssize_t mon_read(struct file *filp, char __user *data,
if (monmsg->pos <= mon_rec_end(monmsg)) {
count = min(count, (size_t) mon_rec_end(monmsg) - monmsg->pos
+ 1);
- ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
- count);
+ ret = copy_to_user(data, __va(monmsg->pos), count);
if (ret)
return -EFAULT;
monmsg->pos += count;
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index 34967e67249e..a108f2bf5b33 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -49,8 +49,6 @@ int register_adapter_interrupt(struct airq_struct *airq)
return -ENOMEM;
airq->flags |= AIRQ_PTR_ALLOCATED;
}
- if (!airq->lsi_mask)
- airq->lsi_mask = 0xff;
snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%p", airq);
CIO_TRACE_EVENT(4, dbf_txt);
isc_register(airq->isc);
@@ -98,7 +96,7 @@ static irqreturn_t do_airq_interrupt(int irq, void *dummy)
head = &airq_lists[tpi_info->isc];
rcu_read_lock();
hlist_for_each_entry_rcu(airq, head, list)
- if ((*airq->lsi_ptr & airq->lsi_mask) != 0)
+ if (*airq->lsi_ptr != 0)
airq->handler(airq, tpi_info);
rcu_read_unlock();
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 4b23c9f7f3e5..ce04caa7913f 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -366,7 +366,6 @@ static int zcdn_create(const char *name)
{
dev_t devt;
int i, rc = 0;
- char nodename[ZCDN_MAX_NAME];
struct zcdn_device *zcdndev;
if (mutex_lock_interruptible(&ap_perms_mutex))
@@ -407,13 +406,11 @@ static int zcdn_create(const char *name)
zcdndev->device.devt = devt;
zcdndev->device.groups = zcdn_dev_attr_groups;
if (name[0])
- strncpy(nodename, name, sizeof(nodename));
+ rc = dev_set_name(&zcdndev->device, "%s", name);
else
- snprintf(nodename, sizeof(nodename),
- ZCRYPT_NAME "_%d", (int)MINOR(devt));
- nodename[sizeof(nodename) - 1] = '\0';
- if (dev_set_name(&zcdndev->device, nodename)) {
- rc = -EINVAL;
+ rc = dev_set_name(&zcdndev->device, ZCRYPT_NAME "_%d", (int)MINOR(devt));
+ if (rc) {
+ kfree(zcdndev);
goto unlockout;
}
rc = device_register(&zcdndev->device);
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index df782646e856..ab2f35bc294d 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -518,12 +518,12 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
if (port) {
put_device(&port->dev);
retval = -EEXIST;
- goto err_out;
+ goto err_put;
}
port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
if (!port)
- goto err_out;
+ goto err_put;
rwlock_init(&port->unit_list_lock);
INIT_LIST_HEAD(&port->unit_list);
@@ -546,7 +546,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
kfree(port);
- goto err_out;
+ goto err_put;
}
retval = -EINVAL;
@@ -563,7 +563,8 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
return port;
-err_out:
+err_put:
zfcp_ccw_adapter_put(adapter);
+err_out:
return ERR_PTR(retval);
}
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 02922768b129..ac67576301bf 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -250,7 +250,6 @@ static struct airq_info *new_airq_info(int index)
info->airq.handler = virtio_airq_handler;
info->summary_indicator_idx = index;
info->airq.lsi_ptr = get_summary_indicator(info);
- info->airq.lsi_mask = 0xff;
info->airq.isc = VIRTIO_AIRQ_ISC;
rc = register_adapter_interrupt(&info->airq);
if (rc) {
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 4962ce989113..695a57d894cd 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -836,7 +836,7 @@ config SCSI_IMM
config SCSI_IZIP_EPP16
bool "ppa/imm option - Use slow (but safe) EPP-16"
- depends on SCSI_PPA || SCSI_IMM
+ depends on SCSI_IMM
help
EPP (Enhanced Parallel Port) is a standard for parallel ports which
allows them to act as expansion buses that can handle up to 64
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 3f062e4013ab..013a9a334972 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1451,7 +1451,7 @@ retry_next:
#endif
break;
}
- scsi_rescan_device(&device->sdev_gendev);
+ scsi_rescan_device(device);
break;
default:
diff --git a/drivers/scsi/aic7xxx/aicasm/Makefile b/drivers/scsi/aic7xxx/aicasm/Makefile
index 243adb0a38d1..a3f2357a3f08 100644
--- a/drivers/scsi/aic7xxx/aicasm/Makefile
+++ b/drivers/scsi/aic7xxx/aicasm/Makefile
@@ -61,23 +61,11 @@ $(OUTDIR)/aicdb.h:
clean:
rm -f $(clean-files)
-# Create a dependency chain in generated files
-# to avoid concurrent invocations of the single
-# rule that builds them all.
-$(OUTDIR)/aicasm_gram.c: $(OUTDIR)/aicasm_gram.h
$(OUTDIR)/aicasm_gram.c $(OUTDIR)/aicasm_gram.h: aicasm_gram.y
- $(YACC) $(YFLAGS) -b $(<:.y=) $<
- mv $(<:.y=).tab.c $(OUTDIR)/$(<:.y=.c)
- mv $(<:.y=).tab.h $(OUTDIR)/$(<:.y=.h)
-
-# Create a dependency chain in generated files
-# to avoid concurrent invocations of the single
-# rule that builds them all.
-$(OUTDIR)/aicasm_macro_gram.c: $(OUTDIR)/aicasm_macro_gram.h
+ $(YACC) $(YFLAGS) -b $(<:.y=) $< -o $(OUTDIR)/$(<:.y=.c)
+
$(OUTDIR)/aicasm_macro_gram.c $(OUTDIR)/aicasm_macro_gram.h: aicasm_macro_gram.y
- $(YACC) $(YFLAGS) -b $(<:.y=) -p mm $<
- mv $(<:.y=).tab.c $(OUTDIR)/$(<:.y=.c)
- mv $(<:.y=).tab.h $(OUTDIR)/$(<:.y=.h)
+ $(YACC) $(YFLAGS) -b $(<:.y=) -p mm $< -o $(OUTDIR)/$(<:.y=.c)
$(OUTDIR)/aicasm_scan.c: aicasm_scan.l
$(LEX) $(LFLAGS) -o $@ $<
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
index 975fcfcc0d8f..2b44eb5702eb 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
@@ -52,6 +52,7 @@
#include <stdlib.h>
#include <string.h>
#include <sysexits.h>
+#include <ctype.h>
#include "aicasm_symbol.h"
#include "aicasm.h"
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 3dd110143471..9dda296c0152 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -28,7 +28,7 @@ static int asd_get_user_sas_addr(struct asd_ha_struct *asd_ha)
if (asd_ha->hw_prof.sas_addr[0])
return 0;
- return sas_request_addr(asd_ha->sas_ha.core.shost,
+ return sas_request_addr(asd_ha->sas_ha.shost,
asd_ha->hw_prof.sas_addr);
}
@@ -72,10 +72,8 @@ static int asd_init_phy(struct asd_phy *phy)
struct asd_sas_phy *sas_phy = &phy->sas_phy;
sas_phy->enabled = 1;
- sas_phy->class = SAS;
sas_phy->iproto = SAS_PROTOCOL_ALL;
sas_phy->tproto = 0;
- sas_phy->type = PHY_TYPE_PHYSICAL;
sas_phy->role = PHY_ROLE_INITIATOR;
sas_phy->oob_mode = OOB_NOT_CONNECTED;
sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index f7f81f6c3fbf..8a3340d8d7ad 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -667,7 +667,6 @@ static int asd_register_sas_ha(struct asd_ha_struct *asd_ha)
}
asd_ha->sas_ha.sas_ha_name = (char *) asd_ha->name;
- asd_ha->sas_ha.lldd_module = THIS_MODULE;
asd_ha->sas_ha.sas_addr = &asd_ha->hw_prof.sas_addr[0];
for (i = 0; i < ASD_MAX_PHYS; i++) {
@@ -688,8 +687,8 @@ static int asd_unregister_sas_ha(struct asd_ha_struct *asd_ha)
err = sas_unregister_ha(&asd_ha->sas_ha);
- sas_remove_host(asd_ha->sas_ha.core.shost);
- scsi_host_put(asd_ha->sas_ha.core.shost);
+ sas_remove_host(asd_ha->sas_ha.shost);
+ scsi_host_put(asd_ha->sas_ha.shost);
kfree(asd_ha->sas_ha.sas_phy);
kfree(asd_ha->sas_ha.sas_port);
@@ -739,7 +738,7 @@ static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
asd_printk("found %s, device %s\n", asd_ha->name, pci_name(dev));
SHOST_TO_SAS_HA(shost) = &asd_ha->sas_ha;
- asd_ha->sas_ha.core.shost = shost;
+ asd_ha->sas_ha.shost = shost;
shost->transportt = aic94xx_transport_template;
shost->max_id = ~0;
shost->max_lun = ~0;
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 7f0208300110..4bfd03724ad6 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -388,14 +388,9 @@ static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task,
flags |= data_dir_flags[task->data_dir];
scb->ata_task.ata_flags = flags;
- scb->ata_task.retry_count = task->ata_task.retry_count;
+ scb->ata_task.retry_count = 0;
- flags = 0;
- if (task->ata_task.set_affil_pol)
- flags |= SET_AFFIL_POLICY;
- if (task->ata_task.stp_affil_pol)
- flags |= STP_AFFIL_POLICY;
- scb->ata_task.flags = flags;
+ scb->ata_task.flags = 0;
}
ascb->tasklet_complete = asd_task_tasklet_complete;
@@ -485,9 +480,6 @@ static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task,
scb->ssp_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
memcpy(scb->ssp_task.ssp_cmd.lun, task->ssp_task.LUN, 8);
- if (task->ssp_task.enable_first_burst)
- scb->ssp_task.ssp_cmd.efb_prio_attr |= EFB_MASK;
- scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_prio << 3);
scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 2cd12c7f06c6..a66221c3b72f 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1715,14 +1715,14 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
arcmsr_flush_adapter_cache(acb);
}
-static int arcmsr_module_init(void)
+static int __init arcmsr_module_init(void)
{
int error = 0;
error = pci_register_driver(&arcmsr_pci_driver);
return error;
}
-static void arcmsr_module_exit(void)
+static void __exit arcmsr_module_exit(void)
{
pci_unregister_driver(&arcmsr_pci_driver);
}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 8aeaddc93b16..8d374ae863ba 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -450,6 +450,10 @@ int beiscsi_iface_set_param(struct Scsi_Host *shost,
}
nla_for_each_attr(attrib, data, dt_len, rm_len) {
+ /* ignore nla_type as it is never used */
+ if (nla_len(attrib) < sizeof(*iface_param))
+ return -EINVAL;
+
iface_param = nla_data(attrib);
if (iface_param->param_type != ISCSI_NET_PARAM)
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index a12d693065ce..1091aa428533 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -800,7 +800,7 @@ struct fc_rscn_pl_s {
u8 command;
u8 pagelen;
__be16 payldlen;
- struct fc_rscn_event_s event[1];
+ struct fc_rscn_event_s event[];
};
/*
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index 773c84af784c..52303e8c716d 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -1051,7 +1051,7 @@ fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn,
rscn->event[0].format = FC_RSCN_FORMAT_PORTID;
rscn->event[0].portid = s_id;
- return sizeof(struct fc_rscn_pl_s);
+ return struct_size(rscn, event, 1);
}
u16
diff --git a/drivers/scsi/elx/libefc_sli/sli4.c b/drivers/scsi/elx/libefc_sli/sli4.c
index 8f96049f62dd..5e7fb110bc3f 100644
--- a/drivers/scsi/elx/libefc_sli/sli4.c
+++ b/drivers/scsi/elx/libefc_sli/sli4.c
@@ -2317,12 +2317,8 @@ sli_xmit_bls_rsp64_wqe(struct sli4 *sli, void *buf,
SLI4_GENERIC_CONTEXT_VPI << SLI4_BLS_RSP_WQE_CT_SHFT;
bls->context_tag = cpu_to_le16(params->vpi);
- if (params->s_id != U32_MAX)
- bls->local_n_port_id_dword |=
- cpu_to_le32(params->s_id & 0x00ffffff);
- else
- bls->local_n_port_id_dword |=
- cpu_to_le32(params->s_id & 0x00ffffff);
+ bls->local_n_port_id_dword |=
+ cpu_to_le32(params->s_id & 0x00ffffff);
dw_ridflags = (dw_ridflags & ~SLI4_BLS_RSP_RID) |
(params->d_id & SLI4_BLS_RSP_RID);
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 5c8d1ba3f8f3..19eee108db02 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -319,16 +319,17 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *sel;
struct fcoe_fcf *fcf;
+ unsigned long flags;
mutex_lock(&fip->ctlr_mutex);
- spin_lock_bh(&fip->ctlr_lock);
+ spin_lock_irqsave(&fip->ctlr_lock, flags);
kfree_skb(fip->flogi_req);
fip->flogi_req = NULL;
list_for_each_entry(fcf, &fip->fcfs, list)
fcf->flogi_sent = 0;
- spin_unlock_bh(&fip->ctlr_lock);
+ spin_unlock_irqrestore(&fip->ctlr_lock, flags);
sel = fip->sel_fcf;
if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr))
@@ -699,6 +700,7 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
{
struct fc_frame *fp;
struct fc_frame_header *fh;
+ unsigned long flags;
u16 old_xid;
u8 op;
u8 mac[ETH_ALEN];
@@ -732,11 +734,11 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
op = FIP_DT_FLOGI;
if (fip->mode == FIP_MODE_VN2VN)
break;
- spin_lock_bh(&fip->ctlr_lock);
+ spin_lock_irqsave(&fip->ctlr_lock, flags);
kfree_skb(fip->flogi_req);
fip->flogi_req = skb;
fip->flogi_req_send = 1;
- spin_unlock_bh(&fip->ctlr_lock);
+ spin_unlock_irqrestore(&fip->ctlr_lock, flags);
schedule_work(&fip->timer_work);
return -EINPROGRESS;
case ELS_FDISC:
@@ -1705,10 +1707,11 @@ static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip)
static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *fcf;
+ unsigned long flags;
int error;
mutex_lock(&fip->ctlr_mutex);
- spin_lock_bh(&fip->ctlr_lock);
+ spin_lock_irqsave(&fip->ctlr_lock, flags);
LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n");
fcf = fcoe_ctlr_select(fip);
if (!fcf || fcf->flogi_sent) {
@@ -1719,7 +1722,7 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
fcoe_ctlr_solicit(fip, NULL);
error = fcoe_ctlr_flogi_send_locked(fip);
}
- spin_unlock_bh(&fip->ctlr_lock);
+ spin_unlock_irqrestore(&fip->ctlr_lock, flags);
mutex_unlock(&fip->ctlr_mutex);
return error;
}
@@ -1736,8 +1739,9 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *fcf;
+ unsigned long flags;
- spin_lock_bh(&fip->ctlr_lock);
+ spin_lock_irqsave(&fip->ctlr_lock, flags);
fcf = fip->sel_fcf;
if (!fcf || !fip->flogi_req_send)
goto unlock;
@@ -1764,7 +1768,7 @@ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
} else /* XXX */
LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n");
unlock:
- spin_unlock_bh(&fip->ctlr_lock);
+ spin_unlock_irqrestore(&fip->ctlr_lock, flags);
}
/**
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index e51e92f932fa..22cef283b2b9 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -27,7 +27,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.6.0.55"
+#define DRV_VERSION "1.6.0.57"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
@@ -236,6 +236,9 @@ struct fnic {
unsigned int wq_count;
unsigned int cq_count;
+ struct mutex sgreset_mutex;
+ spinlock_t sgreset_lock; /* lock for sgreset */
+ struct scsi_cmnd *sgreset_sc;
struct dentry *fnic_stats_debugfs_host;
struct dentry *fnic_stats_debugfs_file;
struct dentry *fnic_reset_debugfs_file;
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
index f4c8769df312..5895ead20e14 100644
--- a/drivers/scsi/fnic/fnic_io.h
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -52,6 +52,8 @@ struct fnic_io_req {
unsigned long start_time; /* in jiffies */
struct completion *abts_done; /* completion for abts */
struct completion *dr_done; /* completion for device reset */
+ unsigned int tag;
+ struct scsi_cmnd *sc; /* midlayer's cmd pointer */
};
enum fnic_port_speeds {
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 984bc5fc55e2..f27f9319e0b2 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -754,6 +754,8 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < FNIC_IO_LOCKS; i++)
spin_lock_init(&fnic->io_req_lock[i]);
+ spin_lock_init(&fnic->sgreset_lock);
+
err = -ENOMEM;
fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
if (!fnic->io_req_pool)
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index be89ce96df46..416d81954819 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1047,9 +1047,9 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
{
u8 type;
u8 hdr_status;
- struct fcpio_tag tag;
+ struct fcpio_tag ftag;
u32 id;
- struct scsi_cmnd *sc;
+ struct scsi_cmnd *sc = NULL;
struct fnic_io_req *io_req;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
@@ -1058,27 +1058,43 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
unsigned long flags;
spinlock_t *io_lock;
unsigned long start_time;
+ unsigned int tag;
- fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
- fcpio_tag_id_dec(&tag, &id);
+ fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag);
+ fcpio_tag_id_dec(&ftag, &id);
- if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
+ tag = id & FNIC_TAG_MASK;
+ if (tag == fnic->fnic_max_tag_id) {
+ if (!(id & FNIC_TAG_DEV_RST)) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Tag out of range id 0x%x hdr status = %s\n",
+ id, fnic_fcpio_status_to_str(hdr_status));
+ return;
+ }
+ } else if (tag > fnic->fnic_max_tag_id) {
shost_printk(KERN_ERR, fnic->lport->host,
- "Tag out of range tag %x hdr status = %s\n",
- id, fnic_fcpio_status_to_str(hdr_status));
+ "Tag out of range tag 0x%x hdr status = %s\n",
+ tag, fnic_fcpio_status_to_str(hdr_status));
return;
}
- sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
+ if ((tag == fnic->fnic_max_tag_id) && (id & FNIC_TAG_DEV_RST)) {
+ sc = fnic->sgreset_sc;
+ io_lock = &fnic->sgreset_lock;
+ } else {
+ sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ }
+
WARN_ON_ONCE(!sc);
if (!sc) {
atomic64_inc(&fnic_stats->io_stats.sc_null);
shost_printk(KERN_ERR, fnic->lport->host,
"itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
- fnic_fcpio_status_to_str(hdr_status), id);
+ fnic_fcpio_status_to_str(hdr_status), tag);
return;
}
- io_lock = fnic_io_lock_hash(fnic, sc);
+
spin_lock_irqsave(io_lock, flags);
io_req = fnic_priv(sc)->io_req;
WARN_ON_ONCE(!io_req);
@@ -1089,7 +1105,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
shost_printk(KERN_ERR, fnic->lport->host,
"itmf_cmpl io_req is null - "
"hdr status = %s tag = 0x%x sc 0x%p\n",
- fnic_fcpio_status_to_str(hdr_status), id, sc);
+ fnic_fcpio_status_to_str(hdr_status), tag, sc);
return;
}
start_time = io_req->start_time;
@@ -1938,6 +1954,10 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
struct scsi_lun fc_lun;
int ret = 0;
unsigned long intr_flags;
+ unsigned int tag = scsi_cmd_to_rq(sc)->tag;
+
+ if (tag == SCSI_NO_TAG)
+ tag = io_req->tag;
spin_lock_irqsave(host->host_lock, intr_flags);
if (unlikely(fnic_chk_state_flags_locked(fnic,
@@ -1964,7 +1984,8 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
/* fill in the lun info */
int_to_scsilun(sc->device->lun, &fc_lun);
- fnic_queue_wq_copy_desc_itmf(wq, scsi_cmd_to_rq(sc)->tag | FNIC_TAG_DEV_RST,
+ tag |= FNIC_TAG_DEV_RST;
+ fnic_queue_wq_copy_desc_itmf(wq, tag,
0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
fc_lun.scsi_lun, io_req->port_id,
fnic->config.ra_tov, fnic->config.ed_tov);
@@ -2146,8 +2167,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
.ret = SUCCESS,
};
- if (new_sc)
- iter_data.lr_sc = lr_sc;
+ iter_data.lr_sc = lr_sc;
scsi_host_busy_iter(fnic->lport->host,
fnic_pending_aborts_iter, &iter_data);
@@ -2168,39 +2188,6 @@ clean_pending_aborts_end:
}
/*
- * fnic_scsi_host_start_tag
- * Allocates tagid from host's tag list
- **/
-static inline int
-fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
-{
- struct request *rq = scsi_cmd_to_rq(sc);
- struct request_queue *q = rq->q;
- struct request *dummy;
-
- dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
- if (IS_ERR(dummy))
- return SCSI_NO_TAG;
-
- rq->tag = dummy->tag;
- sc->host_scribble = (unsigned char *)dummy;
-
- return dummy->tag;
-}
-
-/*
- * fnic_scsi_host_end_tag
- * frees tag allocated by fnic_scsi_host_start_tag.
- **/
-static inline void
-fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
-{
- struct request *dummy = (struct request *)sc->host_scribble;
-
- blk_mq_free_request(dummy);
-}
-
-/*
* SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
* fail to get aborted. It calls driver's eh_device_reset with a SCSI command
* on the LUN.
@@ -2222,7 +2209,6 @@ int fnic_device_reset(struct scsi_cmnd *sc)
struct reset_stats *reset_stats;
int tag = rq->tag;
DECLARE_COMPLETION_ONSTACK(tm_done);
- int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
bool new_sc = 0;
/* Wait for rport to unblock */
@@ -2252,20 +2238,26 @@ int fnic_device_reset(struct scsi_cmnd *sc)
}
fnic_priv(sc)->flags = FNIC_DEVICE_RESET;
- /* Allocate tag if not present */
if (unlikely(tag < 0)) {
/*
- * Really should fix the midlayer to pass in a proper
- * request for ioctls...
+ * For device reset issued through sg3utils, we let
+ * only one LUN_RESET to go through and use a special
+ * tag equal to max_tag_id so that we don't have to allocate
+ * or free it. It won't interact with tags
+ * allocated by mid layer.
*/
- tag = fnic_scsi_host_start_tag(fnic, sc);
- if (unlikely(tag == SCSI_NO_TAG))
- goto fnic_device_reset_end;
- tag_gen_flag = 1;
+ mutex_lock(&fnic->sgreset_mutex);
+ tag = fnic->fnic_max_tag_id;
new_sc = 1;
- }
- io_lock = fnic_io_lock_hash(fnic, sc);
+ fnic->sgreset_sc = sc;
+ io_lock = &fnic->sgreset_lock;
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "fcid: 0x%x lun: 0x%llx flags: 0x%x tag: 0x%x Issuing sgreset\n",
+ rport->port_id, sc->device->lun, fnic_priv(sc)->flags, tag);
+ } else
+ io_lock = fnic_io_lock_hash(fnic, sc);
+
spin_lock_irqsave(io_lock, flags);
io_req = fnic_priv(sc)->io_req;
@@ -2281,6 +2273,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
}
memset(io_req, 0, sizeof(*io_req));
io_req->port_id = rport->port_id;
+ io_req->tag = tag;
+ io_req->sc = sc;
fnic_priv(sc)->io_req = io_req;
}
io_req->dr_done = &tm_done;
@@ -2434,9 +2428,10 @@ fnic_device_reset_end:
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
fnic_flags_and_state(sc));
- /* free tag if it is allocated */
- if (unlikely(tag_gen_flag))
- fnic_scsi_host_end_tag(fnic, sc);
+ if (new_sc) {
+ fnic->sgreset_sc = NULL;
+ mutex_unlock(&fnic->sgreset_mutex);
+ }
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from device reset %s\n",
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index d2eddad099a2..0420bfe9bd42 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -50,11 +50,6 @@ static irqreturn_t gvp11_intr(int irq, void *data)
static int gvp11_xfer_mask = 0;
-void gvp11_setup(char *str, int *ints)
-{
- gvp11_xfer_mask = ints[1];
-}
-
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 8f22ece957bd..9472b9743aef 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -787,7 +787,7 @@ static int hisi_sas_init_device(struct domain_device *device)
* However we don't need to issue a hard reset here for these
* reasons:
* a. When probing the device, libsas/libata already issues a
- * hard reset in sas_probe_sata() -> ata_sas_async_probe().
+ * hard reset in sas_probe_sata() -> ata_port_probe().
* Note that in hisi_sas_debug_I_T_nexus_reset() we take care
* to issue a hard reset by checking the dev status (== INIT).
* b. When resetting the controller, this is simply unnecessary.
@@ -1018,10 +1018,8 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
- sas_phy->class = SAS;
sas_phy->iproto = SAS_PROTOCOL_ALL;
sas_phy->tproto = 0;
- sas_phy->type = PHY_TYPE_PHYSICAL;
sas_phy->role = PHY_ROLE_INITIATOR;
sas_phy->oob_mode = OOB_NOT_CONNECTED;
sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
@@ -1065,23 +1063,18 @@ EXPORT_SYMBOL_GPL(hisi_sas_phy_enable);
static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
{
- struct sas_ha_struct *sas_ha = sas_phy->ha;
- struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
struct hisi_sas_phy *phy = sas_phy->lldd_phy;
struct asd_sas_port *sas_port = sas_phy->port;
struct hisi_sas_port *port;
- unsigned long flags;
if (!sas_port)
return;
port = to_hisi_sas_port(sas_port);
- spin_lock_irqsave(&hisi_hba->lock, flags);
port->port_attached = 1;
port->id = phy->port_id;
phy->port = port;
sas_port->lldd_port = port;
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
}
static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
@@ -2519,10 +2512,9 @@ int hisi_sas_probe(struct platform_device *pdev,
sha->sas_ha_name = DRV_NAME;
sha->dev = hisi_hba->dev;
- sha->lldd_module = THIS_MODULE;
sha->sas_addr = &hisi_hba->sas_addr[0];
sha->num_phys = hisi_hba->n_phy;
- sha->core.shost = hisi_hba->shost;
+ sha->shost = hisi_hba->shost;
for (i = 0; i < hisi_hba->n_phy; i++) {
sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
@@ -2564,12 +2556,12 @@ void hisi_sas_remove(struct platform_device *pdev)
{
struct sas_ha_struct *sha = platform_get_drvdata(pdev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
- struct Scsi_Host *shost = sha->core.shost;
+ struct Scsi_Host *shost = sha->shost;
del_timer_sync(&hisi_hba->timer);
sas_unregister_ha(sha);
- sas_remove_host(sha->core.shost);
+ sas_remove_host(shost);
hisi_sas_free(hisi_hba);
scsi_host_put(shost);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 94fbbceddc2e..3c555579f9a1 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -960,7 +960,7 @@ static void prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
struct sas_tmf_task *tmf = slot->tmf;
int has_data = 0, priority = !!tmf;
- u8 *buf_cmd, fburst = 0;
+ u8 *buf_cmd;
u32 dw1, dw2;
/* create header */
@@ -1018,16 +1018,11 @@ static void prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) +
sizeof(struct ssp_frame_hdr);
- if (task->ssp_task.enable_first_burst) {
- fburst = (1 << 7);
- dw2 |= 1 << CMD_HDR_FIRST_BURST_OFF;
- }
hdr->dw2 = cpu_to_le32(dw2);
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
if (!tmf) {
- buf_cmd[9] = fburst | task->ssp_task.task_attr |
- (task->ssp_task.task_prio << 3);
+ buf_cmd[9] = task->ssp_task.task_attr;
memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
} else {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 87d8e408ccd1..73b378837da7 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1798,8 +1798,7 @@ static void prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
if (!tmf) {
- buf_cmd[9] = task->ssp_task.task_attr |
- (task->ssp_task.task_prio << 3);
+ buf_cmd[9] = task->ssp_task.task_attr;
memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
} else {
@@ -2026,6 +2025,11 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
u16 dma_tx_err_type = le16_to_cpu(err_record->dma_tx_err_type);
u16 sipc_rx_err_type = le16_to_cpu(err_record->sipc_rx_err_type);
u32 dma_rx_err_type = le32_to_cpu(err_record->dma_rx_err_type);
+ struct hisi_sas_complete_v2_hdr *complete_queue =
+ hisi_hba->complete_hdr[slot->cmplt_queue];
+ struct hisi_sas_complete_v2_hdr *complete_hdr =
+ &complete_queue[slot->cmplt_queue_slot];
+ u32 dw0 = le32_to_cpu(complete_hdr->dw0);
int error = -1;
if (err_phase == 1) {
@@ -2310,7 +2314,8 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
break;
}
}
- hisi_sas_sata_done(task, slot);
+ if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)
+ hisi_sas_sata_done(task, slot);
}
break;
default:
@@ -2443,7 +2448,8 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
{
ts->stat = SAS_SAM_STAT_GOOD;
- hisi_sas_sata_done(task, slot);
+ if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)
+ hisi_sas_sata_done(task, slot);
break;
}
default:
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 20e1607c6282..bbb64ee6afd7 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -1326,7 +1326,7 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
if (!tmf) {
- buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3);
+ buf_cmd[9] = ssp_task->task_attr;
memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
} else {
buf_cmd[10] = tmf->tmf;
@@ -2257,7 +2257,8 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
}
- hisi_sas_sata_done(task, slot);
+ if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)
+ hisi_sas_sata_done(task, slot);
break;
case SAS_PROTOCOL_SMP:
ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
@@ -2384,7 +2385,8 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
ts->stat = SAS_SAM_STAT_GOOD;
- hisi_sas_sata_done(task, slot);
+ if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)
+ hisi_sas_sata_done(task, slot);
break;
default:
ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
@@ -3104,21 +3106,25 @@ static const struct hisi_sas_debugfs_reg debugfs_ras_reg = {
static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba)
{
- set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
-
- hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
+ struct Scsi_Host *shost = hisi_hba->shost;
+ scsi_block_requests(shost);
wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000);
+ set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
hisi_sas_sync_cqs(hisi_hba);
+ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
}
static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba)
{
+ struct Scsi_Host *shost = hisi_hba->shost;
+
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
(u32)((1ULL << hisi_hba->queue_count) - 1));
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ scsi_unblock_requests(shost);
}
static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba,
@@ -4576,7 +4582,7 @@ static int debugfs_fifo_data_v3_hw_show(struct seq_file *s, void *p)
debugfs_read_fifo_data_v3_hw(phy);
debugfs_show_row_32_v3_hw(s, 0, HISI_SAS_FIFO_DATA_DW_SIZE * 4,
- phy->fifo.rd_data);
+ (__le32 *)phy->fifo.rd_data);
return 0;
}
@@ -4950,7 +4956,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sha->sas_phy = arr_phy;
sha->sas_port = arr_port;
- sha->core.shost = shost;
+ sha->shost = shost;
sha->lldd_ha = hisi_hba;
shost->transportt = hisi_sas_stt;
@@ -4967,7 +4973,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sha->sas_ha_name = DRV_NAME;
sha->dev = dev;
- sha->lldd_module = THIS_MODULE;
sha->sas_addr = &hisi_hba->sas_addr[0];
sha->num_phys = hisi_hba->n_phy;
@@ -5055,14 +5060,14 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
struct device *dev = &pdev->dev;
struct sas_ha_struct *sha = dev_get_drvdata(dev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
- struct Scsi_Host *shost = sha->core.shost;
+ struct Scsi_Host *shost = sha->shost;
pm_runtime_get_noresume(dev);
del_timer_sync(&hisi_hba->timer);
sas_unregister_ha(sha);
flush_workqueue(hisi_hba->wq);
- sas_remove_host(sha->core.shost);
+ sas_remove_host(shost);
hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
hisi_sas_free(hisi_hba);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 198edf03f929..d7f51b84f3c7 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -537,7 +537,7 @@ EXPORT_SYMBOL(scsi_host_alloc);
static int __scsi_host_match(struct device *dev, const void *data)
{
struct Scsi_Host *p;
- const unsigned short *hostnum = data;
+ const unsigned int *hostnum = data;
p = class_to_shost(dev);
return p->host_no == *hostnum;
@@ -554,7 +554,7 @@ static int __scsi_host_match(struct device *dev, const void *data)
* that scsi_host_get() took. The put_device() below dropped
* the reference from class_find_device().
**/
-struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
+struct Scsi_Host *scsi_host_lookup(unsigned int hostnum)
{
struct device *cdev;
struct Scsi_Host *shost = NULL;
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 6bc3f022630a..52388374cf31 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -306,7 +306,7 @@ static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
static inline struct Scsi_Host *to_shost(struct isci_host *ihost)
{
- return ihost->sas_ha.core.shost;
+ return ihost->sas_ha.shost;
}
#define for_each_isci_host(id, ihost, pdev) \
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index ac1e04b86d8f..6277162a028b 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -250,7 +250,6 @@ static int isci_register_sas_ha(struct isci_host *isci_host)
return -ENOMEM;
sas_ha->sas_ha_name = DRV_NAME;
- sas_ha->lldd_module = THIS_MODULE;
sas_ha->sas_addr = &isci_host->phys[0].sas_addr[0];
for (i = 0; i < SCI_MAX_PHYS; i++) {
@@ -264,9 +263,7 @@ static int isci_register_sas_ha(struct isci_host *isci_host)
sas_ha->strict_wide_ports = 1;
- sas_register_ha(sas_ha);
-
- return 0;
+ return sas_register_ha(sas_ha);
}
static void isci_unregister(struct isci_host *isci_host)
@@ -575,7 +572,7 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
goto err_shost;
SHOST_TO_SAS_HA(shost) = &ihost->sas_ha;
- ihost->sas_ha.core.shost = shost;
+ ihost->sas_ha.shost = shost;
shost->transportt = isci_transport_template;
shost->max_id = ~0;
@@ -730,7 +727,7 @@ static int isci_resume(struct device *dev)
sas_prep_resume_ha(&ihost->sas_ha);
isci_host_init(ihost);
- isci_host_start(ihost->sas_ha.core.shost);
+ isci_host_start(ihost->sas_ha.shost);
wait_for_start(ihost);
sas_resume_ha(&ihost->sas_ha);
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index aa8787343e83..743a3c64b0da 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -1404,10 +1404,8 @@ void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
iphy->sas_phy.ha = &ihost->sas_ha;
iphy->sas_phy.lldd_phy = iphy;
iphy->sas_phy.enabled = 1;
- iphy->sas_phy.class = SAS;
iphy->sas_phy.iproto = SAS_PROTOCOL_ALL;
iphy->sas_phy.tproto = 0;
- iphy->sas_phy.type = PHY_TYPE_PHYSICAL;
iphy->sas_phy.role = PHY_ROLE_INITIATOR;
iphy->sas_phy.oob_mode = OOB_NOT_CONNECTED;
iphy->sas_phy.linkrate = SAS_LINK_RATE_UNKNOWN;
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 6370cdbfba08..a7b3243b471d 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -180,7 +180,7 @@ static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
cmd_iu->_r_a = 0;
cmd_iu->_r_b = 0;
cmd_iu->en_fburst = 0; /* unsupported */
- cmd_iu->task_prio = task->ssp_task.task_prio;
+ cmd_iu->task_prio = 0;
cmd_iu->task_attr = task->ssp_task.task_attr;
cmd_iu->_r_c = 0;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 9ab8555180a3..8e14cea15f98 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -724,6 +724,10 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
return -EEXIST;
}
+ err = -EINVAL;
+ if (!sk_is_tcp(sock->sk))
+ goto free_socket;
+
err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
if (err)
goto free_socket;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 77714a495cbb..12e2653846e3 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -162,7 +162,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
struct domain_device *dev = ap->private_data;
struct sas_ha_struct *sas_ha = dev->port->ha;
- struct Scsi_Host *host = sas_ha->core.shost;
+ struct Scsi_Host *host = sas_ha->shost;
struct sas_internal *i = to_sas_internal(host->transportt);
/* TODO: we should try to remove that unlock */
@@ -201,12 +201,14 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
task->data_dir = qc->dma_dir;
}
task->scatter = qc->sg;
- task->ata_task.retry_count = 1;
qc->lldd_task = task;
task->ata_task.use_ncq = ata_is_ncq(qc->tf.protocol);
task->ata_task.dma_xfer = ata_is_dma(qc->tf.protocol);
+ if (qc->flags & ATA_QCFLAG_RESULT_TF)
+ task->ata_task.return_fis_on_success = 1;
+
if (qc->scsicmd)
ASSIGN_SAS_TASK(qc->scsicmd, task);
@@ -235,7 +237,7 @@ static void sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
static struct sas_internal *dev_to_sas_internal(struct domain_device *dev)
{
- return to_sas_internal(dev->port->ha->core.shost->transportt);
+ return to_sas_internal(dev->port->ha->shost->transportt);
}
static int sas_get_ata_command_set(struct domain_device *dev)
@@ -565,8 +567,6 @@ static struct ata_port_operations sas_sata_ops = {
.qc_prep = ata_noop_qc_prep,
.qc_issue = sas_ata_qc_issue,
.qc_fill_rtf = sas_ata_qc_fill_rtf,
- .port_start = ata_sas_port_start,
- .port_stop = ata_sas_port_stop,
.set_dmamode = sas_ata_set_dmamode,
.sched_eh = sas_ata_sched_eh,
.end_eh = sas_ata_end_eh,
@@ -584,7 +584,7 @@ static struct ata_port_info sata_port_info = {
int sas_ata_init(struct domain_device *found_dev)
{
struct sas_ha_struct *ha = found_dev->port->ha;
- struct Scsi_Host *shost = ha->core.shost;
+ struct Scsi_Host *shost = ha->shost;
struct ata_host *ata_host;
struct ata_port *ap;
int rc;
@@ -607,9 +607,6 @@ int sas_ata_init(struct domain_device *found_dev)
ap->private_data = found_dev;
ap->cbl = ATA_CBL_SATA;
ap->scsi_host = shost;
- rc = ata_sas_port_init(ap);
- if (rc)
- goto destroy_port;
rc = ata_sas_tport_add(ata_host->dev, ap);
if (rc)
@@ -621,7 +618,7 @@ int sas_ata_init(struct domain_device *found_dev)
return 0;
destroy_port:
- ata_sas_port_destroy(ap);
+ kfree(ap);
free_host:
ata_host_put(ata_host);
return rc;
@@ -655,7 +652,7 @@ void sas_probe_sata(struct asd_sas_port *port)
if (!dev_is_sata(dev))
continue;
- ata_sas_async_probe(dev->sata_dev.ap);
+ ata_port_probe(dev->sata_dev.ap);
}
mutex_unlock(&port->ha->disco_mutex);
@@ -822,7 +819,7 @@ static void async_sas_ata_eh(void *data, async_cookie_t cookie)
struct sas_ha_struct *ha = dev->port->ha;
sas_ata_printk(KERN_DEBUG, dev, "dev error handler\n");
- ata_scsi_port_error_handler(ha->core.shost, ap);
+ ata_scsi_port_error_handler(ha->shost, ap);
sas_put_device(dev);
}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 8c6afe724944..ff7b63b10aeb 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -170,7 +170,7 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
{
int res = 0;
struct sas_ha_struct *sas_ha = dev->port->ha;
- struct Scsi_Host *shost = sas_ha->core.shost;
+ struct Scsi_Host *shost = sas_ha->shost;
struct sas_internal *i = to_sas_internal(shost->transportt);
if (!i->dft->lldd_dev_found)
@@ -192,7 +192,7 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
void sas_notify_lldd_dev_gone(struct domain_device *dev)
{
struct sas_ha_struct *sas_ha = dev->port->ha;
- struct Scsi_Host *shost = sas_ha->core.shost;
+ struct Scsi_Host *shost = sas_ha->shost;
struct sas_internal *i = to_sas_internal(shost->transportt);
if (!i->dft->lldd_dev_gone)
@@ -234,7 +234,7 @@ static void sas_suspend_devices(struct work_struct *work)
struct domain_device *dev;
struct sas_discovery_event *ev = to_sas_discovery_event(work);
struct asd_sas_port *port = ev->port;
- struct Scsi_Host *shost = port->ha->core.shost;
+ struct Scsi_Host *shost = port->ha->shost;
struct sas_internal *si = to_sas_internal(shost->transportt);
clear_bit(DISCE_SUSPEND, &port->disc.pending);
@@ -301,7 +301,7 @@ void sas_free_device(struct kref *kref)
if (dev_is_sata(dev) && dev->sata_dev.ap) {
ata_sas_tport_delete(dev->sata_dev.ap);
- ata_sas_port_destroy(dev->sata_dev.ap);
+ kfree(dev->sata_dev.ap);
ata_host_put(dev->sata_dev.ata_host);
dev->sata_dev.ata_host = NULL;
dev->sata_dev.ap = NULL;
@@ -373,7 +373,7 @@ static bool sas_abort_cmd(struct request *req, void *data)
static void sas_abort_device_scsi_cmds(struct domain_device *dev)
{
struct sas_ha_struct *sas_ha = dev->port->ha;
- struct Scsi_Host *shost = sas_ha->core.shost;
+ struct Scsi_Host *shost = sas_ha->shost;
if (dev_is_expander(dev->dev_type))
return;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index adcac57aaee6..a2204674b680 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -37,7 +37,7 @@ static int smp_execute_task_sg(struct domain_device *dev,
int res, retry;
struct sas_task *task = NULL;
struct sas_internal *i =
- to_sas_internal(dev->port->ha->core.shost->transportt);
+ to_sas_internal(dev->port->ha->shost->transportt);
struct sas_ha_struct *ha = dev->port->ha;
pm_runtime_get_sync(ha->dev);
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index 32cdc969b736..2ecb8535634c 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -114,7 +114,7 @@ static int sas_host_smp_write_gpio(struct sas_ha_struct *sas_ha, u8 *resp_data,
u8 reg_type, u8 reg_index, u8 reg_count,
u8 *req_data)
{
- struct sas_internal *i = to_sas_internal(sas_ha->core.shost->transportt);
+ struct sas_internal *i = to_sas_internal(sas_ha->shost->transportt);
int written;
if (i->dft->lldd_write_gpio == NULL) {
@@ -182,7 +182,7 @@ static void sas_phy_control(struct sas_ha_struct *sas_ha, u8 phy_id,
enum sas_linkrate max, u8 *resp_data)
{
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
struct sas_phy_linkrates rates;
struct asd_sas_phy *asd_phy;
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index f2c05ebeb72f..8586dc79f2a0 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -183,7 +183,7 @@ static int sas_get_linkerrors(struct sas_phy *phy)
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
return i->dft->lldd_control_phy(asd_phy, PHY_FUNC_GET_EVENTS, NULL);
}
@@ -232,7 +232,7 @@ static int transport_sas_phy_reset(struct sas_phy *phy, int hard_reset)
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
if (!hard_reset && sas_try_ata_reset(asd_phy) == 0)
return 0;
@@ -266,7 +266,7 @@ int sas_phy_enable(struct sas_phy *phy, int enable)
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
if (enable)
ret = transport_sas_phy_reset(phy, 0);
@@ -303,7 +303,7 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset)
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
ret = i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
} else {
@@ -339,7 +339,7 @@ int sas_set_phy_speed(struct sas_phy *phy,
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
ret = i->dft->lldd_control_phy(asd_phy, PHY_FUNC_SET_LINK_RATE,
rates);
@@ -438,7 +438,7 @@ static void _sas_resume_ha(struct sas_ha_struct *ha, bool drain)
/* all phys are back up or timed out, turn on i/o so we can
* flush out disks that did not return
*/
- scsi_unblock_requests(ha->core.shost);
+ scsi_unblock_requests(ha->shost);
if (drain)
sas_drain_work(ha);
clear_bit(SAS_HA_RESUMING, &ha->state);
@@ -468,7 +468,7 @@ void sas_suspend_ha(struct sas_ha_struct *ha)
int i;
sas_disable_events(ha);
- scsi_block_requests(ha->core.shost);
+ scsi_block_requests(ha->shost);
for (i = 0; i < ha->num_phys; i++) {
struct asd_sas_port *port = ha->sas_port[i];
@@ -641,7 +641,7 @@ struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy,
struct asd_sas_event *event;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
event = kmem_cache_zalloc(sas_event_cache, gfp_flags);
if (!event)
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 6f593fa69b58..a6dc7dc07fce 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -41,13 +41,7 @@ struct sas_phy_data {
void sas_scsi_recover_host(struct Scsi_Host *shost);
-int sas_show_class(enum sas_class class, char *buf);
-int sas_show_proto(enum sas_protocol proto, char *buf);
-int sas_show_linkrate(enum sas_linkrate linkrate, char *buf);
-int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf);
-
int sas_register_phys(struct sas_ha_struct *sas_ha);
-void sas_unregister_phys(struct sas_ha_struct *sas_ha);
struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy, gfp_t gfp_flags);
void sas_free_event(struct asd_sas_event *event);
@@ -91,7 +85,6 @@ int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
u8 *sas_addr, enum sas_device_type *type);
int sas_try_ata_reset(struct asd_sas_phy *phy);
-void sas_hae_reset(struct work_struct *work);
void sas_free_device(struct kref *kref);
void sas_destruct_devices(struct asd_sas_port *port);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index a0d592d11dfb..57494ac97076 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -38,7 +38,7 @@ static void sas_phye_oob_error(struct work_struct *work)
struct sas_ha_struct *sas_ha = phy->ha;
struct asd_sas_port *port = phy->port;
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
sas_deform_port(phy, 1);
@@ -66,7 +66,7 @@ static void sas_phye_spinup_hold(struct work_struct *work)
struct asd_sas_phy *phy = ev->phy;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
phy->error = 0;
i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
@@ -95,7 +95,7 @@ static void sas_phye_shutdown(struct work_struct *work)
struct asd_sas_phy *phy = ev->phy;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
if (phy->enabled) {
int ret;
@@ -131,7 +131,7 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
spin_lock_init(&phy->sas_prim_lock);
phy->frame_rcvd_size = 0;
- phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev, i);
+ phy->phy = sas_phy_alloc(&sas_ha->shost->shost_gendev, i);
if (!phy->phy)
return -ENOMEM;
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 11599c0e3fc3..e3f2ed913419 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -28,7 +28,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
struct domain_device *dev, *n;
struct asd_sas_port *port = phy->port;
struct sas_ha_struct *sas_ha = phy->ha;
- struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt);
+ struct sas_internal *si = to_sas_internal(sas_ha->shost->transportt);
if (si->dft->lldd_port_formed)
si->dft->lldd_port_formed(phy);
@@ -83,7 +83,6 @@ static void sas_form_port_add_phy(struct asd_sas_port *port,
memcpy(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE);
if (*(u64 *)port->attached_sas_addr == 0) {
- port->class = phy->class;
memcpy(port->attached_sas_addr, phy->attached_sas_addr,
SAS_ADDR_SIZE);
port->iproto = phy->iproto;
@@ -109,7 +108,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
struct asd_sas_port *port = phy->port;
struct domain_device *port_dev = NULL;
struct sas_internal *si =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
unsigned long flags;
if (port) {
@@ -212,7 +211,7 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
struct sas_ha_struct *sas_ha = phy->ha;
struct asd_sas_port *port = phy->port;
struct sas_internal *si =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
struct domain_device *dev;
unsigned long flags;
@@ -249,7 +248,6 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
INIT_LIST_HEAD(&port->phy_list);
memset(port->sas_addr, 0, SAS_ADDR_SIZE);
memset(port->attached_sas_addr, 0, SAS_ADDR_SIZE);
- port->class = 0;
port->iproto = 0;
port->tproto = 0;
port->oob_mode = 0;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 94c5f14f3c16..9047cfcd1072 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -142,7 +142,6 @@ static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
task->dev = dev;
task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
- task->ssp_task.retry_count = 1;
int_to_scsilun(cmd->device->lun, &lun);
memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
@@ -279,7 +278,7 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
unsigned long flags;
int i, res;
struct sas_internal *si =
- to_sas_internal(task->dev->port->ha->core.shost->transportt);
+ to_sas_internal(task->dev->port->ha->shost->transportt);
for (i = 0; i < 5; i++) {
pr_notice("%s: aborting task 0x%p\n", __func__, task);
@@ -327,7 +326,7 @@ static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
int res = TMF_RESP_FUNC_FAILED;
struct scsi_lun lun;
struct sas_internal *i =
- to_sas_internal(dev->port->ha->core.shost->transportt);
+ to_sas_internal(dev->port->ha->shost->transportt);
int_to_scsilun(cmd->device->lun, &lun);
@@ -355,7 +354,7 @@ static int sas_recover_I_T(struct domain_device *dev)
{
int res = TMF_RESP_FUNC_FAILED;
struct sas_internal *i =
- to_sas_internal(dev->port->ha->core.shost->transportt);
+ to_sas_internal(dev->port->ha->shost->transportt);
pr_notice("I_T nexus reset for dev %016llx\n",
SAS_ADDR(dev->sas_addr));
@@ -387,37 +386,7 @@ struct sas_phy *sas_get_local_phy(struct domain_device *dev)
}
EXPORT_SYMBOL_GPL(sas_get_local_phy);
-static void sas_wait_eh(struct domain_device *dev)
-{
- struct sas_ha_struct *ha = dev->port->ha;
- DEFINE_WAIT(wait);
-
- if (dev_is_sata(dev)) {
- ata_port_wait_eh(dev->sata_dev.ap);
- return;
- }
- retry:
- spin_lock_irq(&ha->lock);
-
- while (test_bit(SAS_DEV_EH_PENDING, &dev->state)) {
- prepare_to_wait(&ha->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&ha->lock);
- schedule();
- spin_lock_irq(&ha->lock);
- }
- finish_wait(&ha->eh_wait_q, &wait);
-
- spin_unlock_irq(&ha->lock);
-
- /* make sure SCSI EH is complete */
- if (scsi_host_in_recovery(ha->core.shost)) {
- msleep(10);
- goto retry;
- }
-}
-
-static int sas_queue_reset(struct domain_device *dev, int reset_type,
- u64 lun, int wait)
+static int sas_queue_reset(struct domain_device *dev, int reset_type, u64 lun)
{
struct sas_ha_struct *ha = dev->port->ha;
int scheduled = 0, tries = 100;
@@ -425,8 +394,6 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type,
/* ata: promote lun reset to bus reset */
if (dev_is_sata(dev)) {
sas_ata_schedule_reset(dev);
- if (wait)
- sas_ata_wait_eh(dev);
return SUCCESS;
}
@@ -440,13 +407,10 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type,
set_bit(SAS_DEV_EH_PENDING, &dev->state);
set_bit(reset_type, &dev->state);
int_to_scsilun(lun, &dev->ssp_dev.reset_lun);
- scsi_schedule_eh(ha->core.shost);
+ scsi_schedule_eh(ha->shost);
}
spin_unlock_irq(&ha->lock);
- if (wait)
- sas_wait_eh(dev);
-
if (scheduled)
return SUCCESS;
}
@@ -499,7 +463,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
struct sas_internal *i = to_sas_internal(host->transportt);
if (current != host->ehandler)
- return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun, 0);
+ return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun);
int_to_scsilun(cmd->device->lun, &lun);
@@ -522,7 +486,7 @@ int sas_eh_target_reset_handler(struct scsi_cmnd *cmd)
struct sas_internal *i = to_sas_internal(host->transportt);
if (current != host->ehandler)
- return sas_queue_reset(dev, SAS_DEV_RESET, 0, 0);
+ return sas_queue_reset(dev, SAS_DEV_RESET, 0);
if (!i->dft->lldd_I_T_nexus_reset)
return FAILED;
@@ -925,7 +889,7 @@ static int sas_execute_internal_abort(struct domain_device *device,
unsigned int qid, void *data)
{
struct sas_ha_struct *ha = device->port->ha;
- struct sas_internal *i = to_sas_internal(ha->core.shost->transportt);
+ struct sas_internal *i = to_sas_internal(ha->shost->transportt);
struct sas_task *task = NULL;
int res, retry;
@@ -1015,7 +979,7 @@ int sas_execute_tmf(struct domain_device *device, void *parameter,
{
struct sas_task *task;
struct sas_internal *i =
- to_sas_internal(device->port->ha->core.shost->transportt);
+ to_sas_internal(device->port->ha->shost->transportt);
int res, retry;
for (retry = 0; retry < TASK_RETRY; retry++) {
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 9a8963684369..af15f7a22d25 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -309,7 +309,6 @@ struct lpfc_hba;
#define LPFC_VMID_TIMER 300 /* timer interval in seconds */
#define LPFC_MAX_VMID_SIZE 256
-#define LPFC_COMPRESS_VMID_SIZE 16
union lpfc_vmid_io_tag {
u32 app_id; /* App Id vmid */
@@ -667,7 +666,7 @@ struct lpfc_vport {
uint32_t cfg_first_burst_size;
uint32_t dev_loss_tmo_changed;
/* VMID parameters */
- u8 lpfc_vmid_host_uuid[LPFC_COMPRESS_VMID_SIZE];
+ u8 lpfc_vmid_host_uuid[16];
u32 max_vmid; /* maximum VMIDs allowed per port */
u32 cur_vmid_cnt; /* Current VMID count */
#define LPFC_MIN_VMID 4
@@ -872,6 +871,7 @@ enum lpfc_irq_chann_mode {
enum lpfc_hba_bit_flags {
FABRIC_COMANDS_BLOCKED,
HBA_PCI_ERR,
+ MBX_TMO_ERR,
};
struct lpfc_hba {
@@ -1709,6 +1709,25 @@ lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
return cpu_it;
}
/**
+ * lpfc_next_present_cpu - Finds next present CPU after n
+ * @n: the cpu prior to search
+ *
+ * Note: If no next present cpu, then fallback to first present cpu.
+ *
+ **/
+static inline unsigned int lpfc_next_present_cpu(int n)
+{
+ unsigned int cpu;
+
+ cpu = cpumask_next(n, cpu_present_mask);
+
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_first(cpu_present_mask);
+
+ return cpu;
+}
+
+/**
* lpfc_sli4_mod_hba_eq_delay - update EQ delay
* @phba: Pointer to HBA context object.
* @q: The Event Queue to update.
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 21c7ecd3ede5..b1c9107d3408 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2127,11 +2127,12 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
uint32_t *mrpi, uint32_t *arpi,
uint32_t *mvpi, uint32_t *avpi)
{
- struct lpfc_mbx_read_config *rd_config;
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *pmb;
int rc = 0;
- uint32_t max_vpi;
+ struct lpfc_sli4_hba *sli4_hba;
+ struct lpfc_max_cfg_param *max_cfg_param;
+ u16 rsrc_ext_cnt, rsrc_ext_size, max_vpi;
/*
* prevent udev from issuing mailbox commands until the port is
@@ -2167,31 +2168,65 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
}
if (phba->sli_rev == LPFC_SLI_REV4) {
- rd_config = &pmboxq->u.mqe.un.rd_config;
- if (mrpi)
- *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
- if (arpi)
- *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
- phba->sli4_hba.max_cfg_param.rpi_used;
- if (mxri)
- *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
- if (axri)
- *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
- phba->sli4_hba.max_cfg_param.xri_used;
+ sli4_hba = &phba->sli4_hba;
+ max_cfg_param = &sli4_hba->max_cfg_param;
+
+ /* Normally, extents are not used */
+ if (!phba->sli4_hba.extents_in_use) {
+ if (mrpi)
+ *mrpi = max_cfg_param->max_rpi;
+ if (mxri)
+ *mxri = max_cfg_param->max_xri;
+ if (mvpi) {
+ max_vpi = max_cfg_param->max_vpi;
+
+ /* Limit the max we support */
+ if (max_vpi > LPFC_MAX_VPI)
+ max_vpi = LPFC_MAX_VPI;
+ *mvpi = max_vpi;
+ }
+ } else { /* Extents in use */
+ if (mrpi) {
+ if (lpfc_sli4_get_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_RPI,
+ &rsrc_ext_cnt,
+ &rsrc_ext_size)) {
+ rc = 0;
+ goto free_pmboxq;
+ }
+
+ *mrpi = rsrc_ext_cnt * rsrc_ext_size;
+ }
- /* Account for differences with SLI-3. Get vpi count from
- * mailbox data and subtract one for max vpi value.
- */
- max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
- (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
+ if (mxri) {
+ if (lpfc_sli4_get_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_XRI,
+ &rsrc_ext_cnt,
+ &rsrc_ext_size)) {
+ rc = 0;
+ goto free_pmboxq;
+ }
- /* Limit the max we support */
- if (max_vpi > LPFC_MAX_VPI)
- max_vpi = LPFC_MAX_VPI;
- if (mvpi)
- *mvpi = max_vpi;
- if (avpi)
- *avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used;
+ *mxri = rsrc_ext_cnt * rsrc_ext_size;
+ }
+
+ if (mvpi) {
+ if (lpfc_sli4_get_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_VPI,
+ &rsrc_ext_cnt,
+ &rsrc_ext_size)) {
+ rc = 0;
+ goto free_pmboxq;
+ }
+
+ max_vpi = rsrc_ext_cnt * rsrc_ext_size;
+
+ /* Limit the max we support */
+ if (max_vpi > LPFC_MAX_VPI)
+ max_vpi = LPFC_MAX_VPI;
+ *mvpi = max_vpi;
+ }
+ }
} else {
if (mrpi)
*mrpi = pmb->un.varRdConfig.max_rpi;
@@ -2212,8 +2247,12 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
}
}
+ /* Success */
+ rc = 1;
+
+free_pmboxq:
mempool_free(pmboxq, phba->mbox_mem_pool);
- return 1;
+ return rc;
}
/**
@@ -2265,10 +2304,19 @@ lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- uint32_t cnt, acnt;
+ struct lpfc_sli4_hba *sli4_hba;
+ struct lpfc_max_cfg_param *max_cfg_param;
+ u32 cnt = 0, acnt = 0;
- if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
- return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ sli4_hba = &phba->sli4_hba;
+ max_cfg_param = &sli4_hba->max_cfg_param;
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ max_cfg_param->rpi_used);
+ } else {
+ if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
+ return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ }
return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}
@@ -2321,10 +2369,19 @@ lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- uint32_t cnt, acnt;
+ struct lpfc_sli4_hba *sli4_hba;
+ struct lpfc_max_cfg_param *max_cfg_param;
+ u32 cnt = 0, acnt = 0;
- if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
- return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ sli4_hba = &phba->sli4_hba;
+ max_cfg_param = &sli4_hba->max_cfg_param;
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ max_cfg_param->xri_used);
+ } else {
+ if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
+ return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ }
return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}
@@ -2377,10 +2434,19 @@ lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- uint32_t cnt, acnt;
+ struct lpfc_sli4_hba *sli4_hba;
+ struct lpfc_max_cfg_param *max_cfg_param;
+ u32 cnt = 0, acnt = 0;
- if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
- return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ sli4_hba = &phba->sli4_hba;
+ max_cfg_param = &sli4_hba->max_cfg_param;
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ max_cfg_param->vpi_used);
+ } else {
+ if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
+ return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ }
return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 474834f313a7..baae1f8279e0 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1557,7 +1557,8 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
if (fc4_data_1 & LPFC_FC4_TYPE_BITMASK)
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY | LOG_NODE,
"3064 Setting ndlp x%px, DID x%06x "
"with FC4 x%08x, Data: x%08x x%08x "
"%d\n",
@@ -1568,14 +1569,21 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE &&
ndlp->nlp_fc4_type) {
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
-
- lpfc_nlp_set_state(vport, ndlp,
- NLP_STE_PRLI_ISSUE);
- lpfc_issue_els_prli(vport, ndlp, 0);
+ /* This is a fabric topology so if discovery
+ * started with an unsolicited PLOGI, don't
+ * send a PRLI. Targets don't issue PLOGI or
+ * PRLI when acting as a target. Likely this is
+ * an initiator function.
+ */
+ if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_PRLI_ISSUE);
+ lpfc_issue_els_prli(vport, ndlp, 0);
+ }
} else if (!ndlp->nlp_fc4_type) {
/* If fc4 type is still unknown, then LOGO */
lpfc_printf_vlog(vport, KERN_INFO,
- LOG_DISCOVERY,
+ LOG_DISCOVERY | LOG_NODE,
"6443 Sending LOGO ndlp x%px,"
"DID x%06x with fc4_type: "
"x%08x, state: %d\n",
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 7f9b221e7c34..ea9b42225e62 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -6073,7 +6073,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba->hba_debugfs_root,
phba,
&lpfc_debugfs_op_multixripools);
- if (!phba->debug_multixri_pools) {
+ if (IS_ERR(phba->debug_multixri_pools)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0527 Cannot create debugfs multixripools\n");
goto debug_failed;
@@ -6085,7 +6085,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
debugfs_create_file(name, S_IFREG | 0644,
phba->hba_debugfs_root,
phba, &lpfc_cgn_buffer_op);
- if (!phba->debug_cgn_buffer) {
+ if (IS_ERR(phba->debug_cgn_buffer)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"6527 Cannot create debugfs "
"cgn_buffer\n");
@@ -6098,7 +6098,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
debugfs_create_file(name, S_IFREG | 0644,
phba->hba_debugfs_root,
phba, &lpfc_rx_monitor_op);
- if (!phba->debug_rx_monitor) {
+ if (IS_ERR(phba->debug_rx_monitor)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"6528 Cannot create debugfs "
"rx_monitor\n");
@@ -6111,7 +6111,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
debugfs_create_file(name, 0644,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_ras_log);
- if (!phba->debug_ras_log) {
+ if (IS_ERR(phba->debug_ras_log)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"6148 Cannot create debugfs"
" ras_log\n");
@@ -6132,7 +6132,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
debugfs_create_file(name, S_IFREG | 0644,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_lockstat);
- if (!phba->debug_lockstat) {
+ if (IS_ERR(phba->debug_lockstat)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"4610 Can't create debugfs lockstat\n");
goto debug_failed;
@@ -6358,7 +6358,7 @@ nvmeio_off:
debugfs_create_file(name, 0644,
vport->vport_debugfs_root,
vport, &lpfc_debugfs_op_scsistat);
- if (!vport->debug_scsistat) {
+ if (IS_ERR(vport->debug_scsistat)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"4611 Cannot create debugfs scsistat\n");
goto debug_failed;
@@ -6369,7 +6369,7 @@ nvmeio_off:
debugfs_create_file(name, 0644,
vport->vport_debugfs_root,
vport, &lpfc_debugfs_op_ioktime);
- if (!vport->debug_ioktime) {
+ if (IS_ERR(vport->debug_ioktime)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0815 Cannot create debugfs ioktime\n");
goto debug_failed;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 2bad9954c355..54e47f268235 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1041,7 +1041,7 @@ stop_rr_fcf_flogi:
!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
lpfc_nlp_put(ndlp);
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT,
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
"0150 FLOGI failure Status:x%x/x%x "
"xri x%x TMO:x%x refcnt %d\n",
ulp_status, ulp_word4, cmdiocb->sli4_xritag,
@@ -1091,7 +1091,6 @@ stop_rr_fcf_flogi:
if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
lpfc_issue_reg_vfi(vport);
- lpfc_nlp_put(ndlp);
goto out;
}
goto flogifail;
@@ -1332,7 +1331,8 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (phba->cfg_vmid_priority_tagging) {
sp->cmn.priority_tagging = 1;
/* lpfc_vmid_host_uuid is combination of wwpn and wwnn */
- if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) {
+ if (!memchr_inv(vport->lpfc_vmid_host_uuid, 0,
+ sizeof(vport->lpfc_vmid_host_uuid))) {
memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn,
sizeof(phba->wwpn));
memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn,
@@ -2377,10 +2377,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* PRLI failed */
lpfc_printf_vlog(vport, mode, loglevel,
"2754 PRLI failure DID:%06X Status:x%x/x%x, "
- "data: x%x x%x\n",
+ "data: x%x x%x x%x\n",
ndlp->nlp_DID, ulp_status,
ulp_word4, ndlp->nlp_state,
- ndlp->fc4_prli_sent);
+ ndlp->fc4_prli_sent, ndlp->nlp_flag);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
@@ -2391,14 +2391,16 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* mismatch typically caused by an RSCN. Skip any
* processing to allow recovery.
*/
- if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
- ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) {
+ if ((ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
+ ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) ||
+ (ndlp->nlp_state == NLP_STE_NPR_NODE &&
+ ndlp->nlp_flag & NLP_DELAY_TMO)) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "2784 PRLI cmpl: state mismatch "
+ "2784 PRLI cmpl: Allow Node recovery "
"DID x%06x nstate x%x nflag x%x\n",
ndlp->nlp_DID, ndlp->nlp_state,
ndlp->nlp_flag);
- goto out;
+ goto out;
}
/*
@@ -6166,11 +6168,25 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
npr->TaskRetryIdReq = 1;
}
npr->acceptRspCode = PRLI_REQ_EXECUTED;
- npr->estabImagePair = 1;
+
+ /* Set image pair for complementary pairs only. */
+ if (ndlp->nlp_type & NLP_FCP_TARGET)
+ npr->estabImagePair = 1;
+ else
+ npr->estabImagePair = 0;
npr->readXferRdyDis = 1;
npr->ConfmComplAllowed = 1;
npr->prliType = PRLI_FCP_TYPE;
npr->initiatorFunc = 1;
+
+ /* Xmit PRLI ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_NODE | LOG_DISCOVERY,
+ "6014 FCP issue PRLI ACC imgpair %d "
+ "retry %d task %d\n",
+ npr->estabImagePair,
+ npr->Retry, npr->TaskRetryIdReq);
+
} else if (prli_fc4_req == PRLI_NVME_TYPE) {
/* Respond with an NVME PRLI Type */
npr_nvme = (struct lpfc_nvme_prli *) pcmd;
@@ -9588,11 +9604,13 @@ void
lpfc_els_flush_cmd(struct lpfc_vport *vport)
{
LIST_HEAD(abort_list);
+ LIST_HEAD(cancel_list);
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *tmp_iocb, *piocb;
u32 ulp_command;
unsigned long iflags = 0;
+ bool mbx_tmo_err;
lpfc_fabric_abort_vport(vport);
@@ -9614,15 +9632,16 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock);
+ mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags);
/* First we need to issue aborts to outstanding cmds on txcmpl */
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
- if (piocb->cmd_flag & LPFC_IO_LIBDFC)
+ if (piocb->cmd_flag & LPFC_IO_LIBDFC && !mbx_tmo_err)
continue;
if (piocb->vport != vport)
continue;
- if (piocb->cmd_flag & LPFC_DRIVER_ABORTED)
+ if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err)
continue;
/* On the ELS ring we can have ELS_REQUESTs or
@@ -9641,8 +9660,8 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
*/
if (phba->link_state == LPFC_LINK_DOWN)
piocb->cmd_cmpl = lpfc_cmpl_els_link_down;
- }
- if (ulp_command == CMD_GEN_REQUEST64_CR)
+ } else if (ulp_command == CMD_GEN_REQUEST64_CR ||
+ mbx_tmo_err)
list_add_tail(&piocb->dlist, &abort_list);
}
@@ -9654,11 +9673,19 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
spin_lock_irqsave(&phba->hbalock, iflags);
list_del_init(&piocb->dlist);
- lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
+ if (mbx_tmo_err)
+ list_move_tail(&piocb->list, &cancel_list);
+ else
+ lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
+
spin_unlock_irqrestore(&phba->hbalock, iflags);
}
- /* Make sure HBA is alive */
- lpfc_issue_hb_tmo(phba);
+ if (!list_empty(&cancel_list))
+ lpfc_sli_cancel_iocbs(phba, &cancel_list, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_ABORTED);
+ else
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
if (!list_empty(&abort_list))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -12331,9 +12358,10 @@ lpfc_vmid_uvem(struct lpfc_vport *vport,
elsiocb->vmid_tag.vmid_context = vmid_context;
pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
- if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid))
+ if (!memchr_inv(vport->lpfc_vmid_host_uuid, 0,
+ sizeof(vport->lpfc_vmid_host_uuid)))
memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid,
- LPFC_COMPRESS_VMID_SIZE);
+ sizeof(vport->lpfc_vmid_host_uuid));
*((u32 *)(pcmd)) = ELS_CMD_UVEM;
len = (u32 *)(pcmd + 4);
@@ -12343,13 +12371,13 @@ lpfc_vmid_uvem(struct lpfc_vport *vport,
vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG);
vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE);
memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid,
- LPFC_COMPRESS_VMID_SIZE);
+ sizeof(vem_id_desc->vem_id));
inst_desc = (struct instantiated_ve_desc *)(pcmd + 32);
inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG);
inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE);
memcpy(inst_desc->global_vem_id, vmid->host_vmid,
- LPFC_COMPRESS_VMID_SIZE);
+ sizeof(inst_desc->global_vem_id));
bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID);
bf_set(lpfc_instantiated_local_id, inst_desc,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index fdd7f69d87ef..5154eeaee0ec 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -169,29 +169,45 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"3181 dev_loss_callbk x%06x, rport x%px flg x%x "
- "load_flag x%x refcnt %d state %d xpt x%x\n",
+ "load_flag x%x refcnt %u state %d xpt x%x\n",
ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
vport->load_flag, kref_read(&ndlp->kref),
ndlp->nlp_state, ndlp->fc4_xpt_flags);
- /* Don't schedule a worker thread event if the vport is going down.
- * The teardown process cleans up the node via lpfc_drop_node.
- */
+ /* Don't schedule a worker thread event if the vport is going down. */
if (vport->load_flag & FC_UNLOADING) {
- ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL;
+ spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->rport = NULL;
- ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
- /* clear the NLP_XPT_REGD if the node is not registered
- * with nvme-fc
+ /* The scsi_transport is done with the rport so lpfc cannot
+ * call to unregister. Remove the scsi transport reference
+ * and clean up the SCSI transport node details.
*/
- if (ndlp->fc4_xpt_flags == NLP_XPT_REGD)
- ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
+ if (ndlp->fc4_xpt_flags & (NLP_XPT_REGD | SCSI_XPT_REGD)) {
+ ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
+
+ /* NVME transport-registered rports need the
+ * NLP_XPT_REGD flag to complete an unregister.
+ */
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
+ ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_nlp_put(ndlp);
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ }
- /* Remove the node reference from remote_port_add now.
- * The driver will not call remote_port_delete.
+ /* Only 1 thread can drop the initial node reference. If
+ * another thread has set NLP_DROPPED, this thread is done.
*/
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD) &&
+ !(ndlp->nlp_flag & NLP_DROPPED)) {
+ ndlp->nlp_flag |= NLP_DROPPED;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_nlp_put(ndlp);
+ return;
+ }
+
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
return;
}
@@ -4686,7 +4702,8 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_lock_irqsave(&ndlp->lock, iflags);
if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
spin_unlock_irqrestore(&ndlp->lock, iflags);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_NODE | LOG_DISCOVERY,
"0999 %s Not regd: ndlp x%px rport x%px DID "
"x%x FLG x%x XPT x%x\n",
__func__, ndlp, ndlp->rport, ndlp->nlp_DID,
@@ -4702,9 +4719,10 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
vport->phba->nport_event_cnt++;
lpfc_unregister_remote_port(ndlp);
} else if (!ndlp->rport) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_NODE | LOG_DISCOVERY,
"1999 %s NDLP in devloss x%px DID x%x FLG x%x"
- " XPT x%x refcnt %d\n",
+ " XPT x%x refcnt %u\n",
__func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->fc4_xpt_flags,
kref_read(&ndlp->kref));
@@ -4954,22 +4972,29 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
/*
* Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
- * be used if we wish to issue the "last" lpfc_nlp_put() to remove
- * the ndlp from the vport. The ndlp marked as UNUSED on the list
- * until ALL other outstanding threads have completed. We check
- * that the ndlp not already in the UNUSED state before we proceed.
+ * be used when lpfc wants to remove the "last" lpfc_nlp_put() to
+ * release the ndlp from the vport when conditions are correct.
*/
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
return;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
- ndlp->nlp_flag |= NLP_DROPPED;
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
lpfc_cleanup_vports_rrqs(vport, ndlp);
lpfc_unreg_rpi(vport, ndlp);
}
- lpfc_nlp_put(ndlp);
- return;
+ /* NLP_DROPPED means another thread already removed the initial
+ * reference from lpfc_nlp_init. If set, don't drop it again and
+ * introduce an imbalance.
+ */
+ spin_lock_irq(&ndlp->lock);
+ if (!(ndlp->nlp_flag & NLP_DROPPED)) {
+ ndlp->nlp_flag |= NLP_DROPPED;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_nlp_put(ndlp);
+ return;
+ }
+ spin_unlock_irq(&ndlp->lock);
}
/*
@@ -5757,8 +5782,11 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
(NLP_FCP_TARGET | NLP_NVME_TARGET)))
return NULL;
- lpfc_disc_state_machine(vport, ndlp, NULL,
- NLP_EVT_DEVICE_RECOVERY);
+ if (ndlp->nlp_state > NLP_STE_UNUSED_NODE &&
+ ndlp->nlp_state < NLP_STE_PRLI_ISSUE) {
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
+ }
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index aaea3e31944d..2108b4cb7815 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -764,6 +764,8 @@ typedef struct _PRLI { /* Structure is in Big Endian format */
#define PRLI_PREDEF_CONFIG 0x5
#define PRLI_PARTIAL_SUCCESS 0x6
#define PRLI_INVALID_PAGE_CNT 0x7
+#define PRLI_INV_SRV_PARM 0x8
+
uint8_t word0Reserved3; /* FC Parm Word 0, bit 0:7 */
uint32_t origProcAssoc; /* FC Parm Word 1, bit 0:31 */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 3221a934066b..9e59c050103d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2123,7 +2123,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
en_rn_msg = false;
} else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3144 Port Down: Debug Dump\n");
else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
@@ -7550,6 +7550,8 @@ lpfc_disable_pci_dev(struct lpfc_hba *phba)
void
lpfc_reset_hba(struct lpfc_hba *phba)
{
+ int rc = 0;
+
/* If resets are disabled then set error state and return. */
if (!phba->cfg_enable_hba_reset) {
phba->link_state = LPFC_HBA_ERROR;
@@ -7560,13 +7562,25 @@ lpfc_reset_hba(struct lpfc_hba *phba)
if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
lpfc_offline_prep(phba, LPFC_MBX_WAIT);
} else {
+ if (test_bit(MBX_TMO_ERR, &phba->bit_flags)) {
+ /* Perform a PCI function reset to start from clean */
+ rc = lpfc_pci_function_reset(phba);
+ lpfc_els_flush_all_cmd(phba);
+ }
lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
lpfc_sli_flush_io_rings(phba);
}
lpfc_offline(phba);
- lpfc_sli_brdrestart(phba);
- lpfc_online(phba);
- lpfc_unblock_mgmt_io(phba);
+ clear_bit(MBX_TMO_ERR, &phba->bit_flags);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "8888 PCI function reset failed rc %x\n",
+ rc);
+ } else {
+ lpfc_sli_brdrestart(phba);
+ lpfc_online(phba);
+ lpfc_unblock_mgmt_io(phba);
+ }
}
/**
@@ -12498,10 +12512,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
(new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
(new_cpup->phys_id == cpup->phys_id))
goto found_same;
- new_cpu = cpumask_next(
- new_cpu, cpu_present_mask);
- if (new_cpu >= nr_cpu_ids)
- new_cpu = first_cpu;
+ new_cpu = lpfc_next_present_cpu(new_cpu);
}
/* At this point, we leave the CPU as unassigned */
continue;
@@ -12513,9 +12524,7 @@ found_same:
* chance of having multiple unassigned CPU entries
* selecting the same IRQ.
*/
- start_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (start_cpu >= nr_cpu_ids)
- start_cpu = first_cpu;
+ start_cpu = lpfc_next_present_cpu(new_cpu);
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3337 Set Affinity: CPU %d "
@@ -12548,10 +12557,7 @@ found_same:
if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
(new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
goto found_any;
- new_cpu = cpumask_next(
- new_cpu, cpu_present_mask);
- if (new_cpu >= nr_cpu_ids)
- new_cpu = first_cpu;
+ new_cpu = lpfc_next_present_cpu(new_cpu);
}
/* We should never leave an entry unassigned */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -12567,9 +12573,7 @@ found_any:
* chance of having multiple unassigned CPU entries
* selecting the same IRQ.
*/
- start_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (start_cpu >= nr_cpu_ids)
- start_cpu = first_cpu;
+ start_cpu = lpfc_next_present_cpu(new_cpu);
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3338 Set Affinity: CPU %d "
@@ -12640,9 +12644,7 @@ found_any:
new_cpup->core_id == cpup->core_id) {
goto found_hdwq;
}
- new_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (new_cpu >= nr_cpu_ids)
- new_cpu = first_cpu;
+ new_cpu = lpfc_next_present_cpu(new_cpu);
}
/* If we can't match both phys_id and core_id,
@@ -12654,10 +12656,7 @@ found_any:
if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
new_cpup->phys_id == cpup->phys_id)
goto found_hdwq;
-
- new_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (new_cpu >= nr_cpu_ids)
- new_cpu = first_cpu;
+ new_cpu = lpfc_next_present_cpu(new_cpu);
}
/* Otherwise just round robin on cfg_hdw_queue */
@@ -12666,9 +12665,7 @@ found_any:
goto logit;
found_hdwq:
/* We found an available entry, copy the IRQ info */
- start_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (start_cpu >= nr_cpu_ids)
- start_cpu = first_cpu;
+ start_cpu = lpfc_next_present_cpu(new_cpu);
cpup->hdwq = new_cpup->hdwq;
logit:
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index b86ff9fcdf0c..1eb7f7e60bba 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -879,23 +879,34 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_unlock_irq(shost->host_lock);
lpfc_retry_pport_discovery(phba);
}
- } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
- ((ndlp->nlp_type & NLP_FCP_TARGET) ||
- (ndlp->nlp_type & NLP_NVME_TARGET) ||
- (vport->fc_flag & FC_PT2PT))) ||
- (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
- /* Only try to re-login if this is NOT a Fabric Node
- * AND the remote NPORT is a FCP/NVME Target or we
- * are in pt2pt mode. NLP_STE_ADISC_ISSUE is a special
- * case for LOGO as a response to ADISC behavior.
- */
- mod_timer(&ndlp->nlp_delayfunc,
- jiffies + msecs_to_jiffies(1000 * 1));
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(&ndlp->lock);
-
- ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_ELS | LOG_DISCOVERY,
+ "3203 LOGO recover nport x%06x state x%x "
+ "ntype x%x fc_flag x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_state,
+ ndlp->nlp_type, vport->fc_flag);
+
+ /* Special cases for rports that recover post LOGO. */
+ if ((!(ndlp->nlp_type == NLP_FABRIC) &&
+ (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) ||
+ vport->fc_flag & FC_PT2PT)) ||
+ (ndlp->nlp_state >= NLP_STE_ADISC_ISSUE ||
+ ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) {
+ mod_timer(&ndlp->nlp_delayfunc,
+ jiffies + msecs_to_jiffies(1000 * 1));
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(&ndlp->lock);
+ ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_ELS | LOG_DISCOVERY,
+ "3204 Start nlpdelay on DID x%06x "
+ "nflag x%x lastels x%x ref cnt %u",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_last_elscmd,
+ kref_read(&ndlp->kref));
+ }
}
out:
/* Unregister from backend, could have been skipped due to ADISC */
@@ -1854,7 +1865,6 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
LPFC_MBOXQ_t *mb;
LPFC_MBOXQ_t *nextmb;
- struct lpfc_nodelist *ns_ndlp;
cmdiocb = (struct lpfc_iocbq *) arg;
@@ -1882,13 +1892,6 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
}
spin_unlock_irq(&phba->hbalock);
- /* software abort if any GID_FT is outstanding */
- if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
- ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (ns_ndlp)
- lpfc_els_abort(phba, ns_ndlp);
- }
-
lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
@@ -2148,6 +2151,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_nvme_prli *nvpr;
void *temp_ptr;
u32 ulp_status;
+ bool acc_imode_sps = false;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->rsp_iocb;
@@ -2182,22 +2186,32 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
goto out_err;
}
- if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
- (npr->prliType == PRLI_FCP_TYPE)) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6028 FCP NPR PRLI Cmpl Init %d Target %d\n",
- npr->initiatorFunc,
- npr->targetFunc);
- if (npr->initiatorFunc)
- ndlp->nlp_type |= NLP_FCP_INITIATOR;
- if (npr->targetFunc) {
- ndlp->nlp_type |= NLP_FCP_TARGET;
- if (npr->writeXferRdyDis)
- ndlp->nlp_flag |= NLP_FIRSTBURST;
+ if (npr && npr->prliType == PRLI_FCP_TYPE) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_NODE | LOG_DISCOVERY,
+ "6028 FCP NPR PRLI Cmpl Init %d Target %d "
+ "EIP %d AccCode x%x\n",
+ npr->initiatorFunc, npr->targetFunc,
+ npr->estabImagePair, npr->acceptRspCode);
+
+ if (npr->acceptRspCode == PRLI_INV_SRV_PARM) {
+ /* Strict initiators don't establish an image pair. */
+ if (npr->initiatorFunc && !npr->targetFunc &&
+ !npr->estabImagePair)
+ acc_imode_sps = true;
}
- if (npr->Retry)
- ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+ if (npr->acceptRspCode == PRLI_REQ_EXECUTED || acc_imode_sps) {
+ if (npr->initiatorFunc)
+ ndlp->nlp_type |= NLP_FCP_INITIATOR;
+ if (npr->targetFunc) {
+ ndlp->nlp_type |= NLP_FCP_TARGET;
+ if (npr->writeXferRdyDis)
+ ndlp->nlp_flag |= NLP_FIRSTBURST;
+ }
+ if (npr->Retry)
+ ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+ }
} else if (nvpr &&
(bf_get_be32(prli_acc_rsp_code, nvpr) ==
PRLI_REQ_EXECUTED) &&
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 8db7cb99903d..96e11a26c297 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -228,8 +228,7 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
spin_unlock_irq(&ndlp->lock);
/* On a devloss timeout event, one more put is executed provided the
- * NVME and SCSI rport unregister requests are complete. If the vport
- * is unloading, this extra put is executed by lpfc_drop_node.
+ * NVME and SCSI rport unregister requests are complete.
*/
if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags))
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
@@ -1864,7 +1863,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_fcpreq_priv *freqpriv;
unsigned long flags;
int ret_val;
- struct nvme_fc_cmd_iu *cp;
/* Validate pointers. LLDD fault handling with transport does
* have timing races.
@@ -1988,16 +1986,10 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
return;
}
- /*
- * Get Command Id from cmd to plug into response. This
- * code is not needed in the next NVME Transport drop.
- */
- cp = (struct nvme_fc_cmd_iu *)lpfc_nbuf->nvmeCmd->cmdaddr;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
"6138 Transport Abort NVME Request Issued for "
- "ox_id x%x nvme opcode x%x nvme cmd_id x%x\n",
- nvmereq_wqe->sli4_xritag, cp->sqe.common.opcode,
- cp->sqe.common.command_id);
+ "ox_id x%x\n",
+ nvmereq_wqe->sli4_xritag);
return;
out_unlock:
@@ -2510,8 +2502,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"6031 RemotePort Registration failed "
- "err: %d, DID x%06x\n",
- ret, ndlp->nlp_DID);
+ "err: %d, DID x%06x ref %u\n",
+ ret, ndlp->nlp_DID, kref_read(&ndlp->kref));
+ lpfc_nlp_put(ndlp);
}
return ret;
@@ -2573,11 +2566,7 @@ lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* nvme_transport perspective. Loss of an rport just means IO cannot
* be sent and recovery is completely up to the initator.
* For now, the driver just unbinds the DID and port_role so that
- * no further IO can be issued. Changes are planned for later.
- *
- * Notes - the ndlp reference count is not decremented here since
- * since there is no nvme_transport api for devloss. Node ref count
- * is only adjusted in driver unload.
+ * no further IO can be issued.
*/
void
lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
@@ -2652,6 +2641,21 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
"6167 NVME unregister failed %d "
"port_state x%x\n",
ret, remoteport->port_state);
+
+ if (vport->load_flag & FC_UNLOADING) {
+ /* Only 1 thread can drop the initial node
+ * reference. Check if another thread has set
+ * NLP_DROPPED.
+ */
+ spin_lock_irq(&ndlp->lock);
+ if (!(ndlp->nlp_flag & NLP_DROPPED)) {
+ ndlp->nlp_flag |= NLP_DROPPED;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_nlp_put(ndlp);
+ return;
+ }
+ spin_unlock_irq(&ndlp->lock);
+ }
}
}
return;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index dff4584d338b..425328d9c2d8 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1620,10 +1620,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
cpu = cpumask_first(cpu_present_mask);
continue;
}
- cpu = cpumask_next(cpu, cpu_present_mask);
- if (cpu == nr_cpu_ids)
- cpu = cpumask_first(cpu_present_mask);
-
+ cpu = lpfc_next_present_cpu(cpu);
}
for_each_present_cpu(i) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 58d10f8f75a7..4dfadf254a72 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -3935,6 +3935,8 @@ void lpfc_poll_eratt(struct timer_list *t)
uint64_t sli_intr, cnt;
phba = from_timer(phba, t, eratt_poll);
+ if (!(phba->hba_flag & HBA_SETUP))
+ return;
/* Here we will also keep track of interrupts per sec of the hba */
sli_intr = phba->sli.slistat.sli_intr;
@@ -7693,7 +7695,9 @@ lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
spin_unlock_irq(&phba->hbalock);
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "3161 Failure to post sgl to port.\n");
+ "3161 Failure to post sgl to port,status %x "
+ "blkcnt %d totalcnt %d postcnt %d\n",
+ status, block_cnt, total_cnt, post_cnt);
return -EIO;
}
@@ -8478,6 +8482,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
}
}
+ phba->hba_flag &= ~HBA_SETUP;
lpfc_sli4_dip(phba);
@@ -9282,6 +9287,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
* would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
* it to fail all outstanding SCSI IO.
*/
+ set_bit(MBX_TMO_ERR, &phba->bit_flags);
spin_lock_irq(&phba->pport->work_port_lock);
phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
spin_unlock_irq(&phba->pport->work_port_lock);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 6f35491aed0f..13a547277f97 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.2.0.13"
+#define LPFC_DRIVER_VERSION "14.2.0.14"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index ef2b6380e19a..bc867da650b6 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -438,7 +438,7 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
// set up PCI related soft state and other pre-known parameters
- adapter->unique_id = pdev->bus->number << 8 | pdev->devfn;
+ adapter->unique_id = pci_dev_id(pdev);
adapter->irq = pdev->irq;
adapter->pdev = pdev;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 3554f6b07727..94abba57582d 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -2332,7 +2332,7 @@ struct megasas_instance {
u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */
bool use_seqnum_jbod_fp; /* Added for PD sequence */
bool smp_affinity_enable;
- spinlock_t crashdump_lock;
+ struct mutex crashdump_lock;
struct megasas_register_set __iomem *reg_set;
u32 __iomem *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 050eed8e2684..e1aa667dae66 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3271,14 +3271,13 @@ fw_crash_buffer_store(struct device *cdev,
struct megasas_instance *instance =
(struct megasas_instance *) shost->hostdata;
int val = 0;
- unsigned long flags;
if (kstrtoint(buf, 0, &val) != 0)
return -EINVAL;
- spin_lock_irqsave(&instance->crashdump_lock, flags);
+ mutex_lock(&instance->crashdump_lock);
instance->fw_crash_buffer_offset = val;
- spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ mutex_unlock(&instance->crashdump_lock);
return strlen(buf);
}
@@ -3293,24 +3292,23 @@ fw_crash_buffer_show(struct device *cdev,
unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
unsigned long chunk_left_bytes;
unsigned long src_addr;
- unsigned long flags;
u32 buff_offset;
- spin_lock_irqsave(&instance->crashdump_lock, flags);
+ mutex_lock(&instance->crashdump_lock);
buff_offset = instance->fw_crash_buffer_offset;
if (!instance->crash_dump_buf ||
!((instance->fw_crash_state == AVAILABLE) ||
(instance->fw_crash_state == COPYING))) {
dev_err(&instance->pdev->dev,
"Firmware crash dump is not available\n");
- spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ mutex_unlock(&instance->crashdump_lock);
return -EINVAL;
}
if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
dev_err(&instance->pdev->dev,
"Firmware crash dump offset is out of range\n");
- spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ mutex_unlock(&instance->crashdump_lock);
return 0;
}
@@ -3322,7 +3320,7 @@ fw_crash_buffer_show(struct device *cdev,
src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
(buff_offset % dmachunk);
memcpy(buf, (void *)src_addr, size);
- spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ mutex_unlock(&instance->crashdump_lock);
return size;
}
@@ -3347,7 +3345,6 @@ fw_crash_state_store(struct device *cdev,
struct megasas_instance *instance =
(struct megasas_instance *) shost->hostdata;
int val = 0;
- unsigned long flags;
if (kstrtoint(buf, 0, &val) != 0)
return -EINVAL;
@@ -3361,9 +3358,9 @@ fw_crash_state_store(struct device *cdev,
instance->fw_crash_state = val;
if ((val == COPIED) || (val == COPY_ERROR)) {
- spin_lock_irqsave(&instance->crashdump_lock, flags);
+ mutex_lock(&instance->crashdump_lock);
megasas_free_host_crash_buffer(instance);
- spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ mutex_unlock(&instance->crashdump_lock);
if (val == COPY_ERROR)
dev_info(&instance->pdev->dev, "application failed to "
"copy Firmware crash dump\n");
@@ -7422,7 +7419,7 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
init_waitqueue_head(&instance->int_cmd_wait_q);
init_waitqueue_head(&instance->abort_cmd_wait_q);
- spin_lock_init(&instance->crashdump_lock);
+ mutex_init(&instance->crashdump_lock);
spin_lock_init(&instance->mfi_pool_lock);
spin_lock_init(&instance->hba_lock);
spin_lock_init(&instance->stream_lock);
@@ -7518,7 +7515,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
*/
instance->pdev = pdev;
instance->host = host;
- instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
+ instance->unique_id = pci_dev_id(pdev);
instance->init_id = MEGASAS_DEFAULT_INIT_ID;
megasas_set_adapter_type(instance);
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
index 2fc196499c89..35f81af40f51 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -1482,7 +1482,7 @@ struct mpi3_security_page0 {
#define MPI3_SECURITY1_KEY_RECORD_MAX 1
#endif
#ifndef MPI3_SECURITY1_PAD_MAX
-#define MPI3_SECURITY1_PAD_MAX 1
+#define MPI3_SECURITY1_PAD_MAX 4
#endif
union mpi3_security1_key_data {
__le32 dword[128];
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
index f5e9c2309ce6..1e4a60fc655f 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -600,6 +600,7 @@ struct mpi3_event_data_pcie_error_threshold {
__le16 threshold_count;
__le16 attached_dev_handle;
__le16 reserved12;
+ __le32 reserved14;
};
#define MPI3_EVENT_PCI_ERROR_RC_THRESHOLD_EXCEEDED (0x00)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
index 441cfc2c7f09..1e0a3dcaf723 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
@@ -18,7 +18,7 @@ union mpi3_version_union {
#define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0)
-#define MPI3_VERSION_UNIT (27)
+#define MPI3_VERSION_UNIT (28)
#define MPI3_VERSION_DEV (0)
#define MPI3_DEVHANDLE_INVALID (0xffff)
struct mpi3_sysif_oper_queue_indexes {
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 0afb687402e1..ae98d15c30b1 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -55,8 +55,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
-#define MPI3MR_DRIVER_VERSION "8.4.1.0.0"
-#define MPI3MR_DRIVER_RELDATE "16-March-2023"
+#define MPI3MR_DRIVER_VERSION "8.5.0.0.0"
+#define MPI3MR_DRIVER_RELDATE "24-July-2023"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@@ -66,11 +66,12 @@ extern atomic64_t event_counter;
#define MPI3MR_NAME_LENGTH 32
#define IOCNAME "%s: "
-#define MPI3MR_MAX_SECTORS 2048
+#define MPI3MR_DEFAULT_MAX_IO_SIZE (1 * 1024 * 1024)
/* Definitions for internal SGL and Chain SGL buffers */
#define MPI3MR_PAGE_SIZE_4K 4096
-#define MPI3MR_SG_DEPTH (MPI3MR_PAGE_SIZE_4K / sizeof(struct mpi3_sge_common))
+#define MPI3MR_DEFAULT_SGL_ENTRIES 256
+#define MPI3MR_MAX_SGL_ENTRIES 2048
/* Definitions for MAX values for shost */
#define MPI3MR_MAX_CMDS_LUN 128
@@ -206,6 +207,9 @@ extern atomic64_t event_counter;
*/
#define MPI3MR_MAX_APP_XFER_SECTORS (2048 + 512)
+#define MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS 256
+#define MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS 2048
+
/**
* struct mpi3mr_nvme_pt_sge - Structure to store SGEs for NVMe
* Encapsulated commands.
@@ -323,6 +327,7 @@ struct mpi3mr_ioc_facts {
u16 max_perids;
u16 max_pds;
u16 max_sasexpanders;
+ u32 max_data_length;
u16 max_sasinitiators;
u16 max_enclosures;
u16 max_pcie_switches;
@@ -676,6 +681,7 @@ enum mpi3mr_dev_state {
* @io_unit_port: IO Unit port ID
* @non_stl: Is this device not to be attached with SAS TL
* @io_throttle_enabled: I/O throttling needed or not
+ * @wslen: Write same max length
* @q_depth: Device specific Queue Depth
* @wwid: World wide ID
* @enclosure_logical_id: Enclosure logical identifier
@@ -698,6 +704,7 @@ struct mpi3mr_tgt_dev {
u8 io_unit_port;
u8 non_stl;
u8 io_throttle_enabled;
+ u16 wslen;
u16 q_depth;
u64 wwid;
u64 enclosure_logical_id;
@@ -751,6 +758,8 @@ static inline void mpi3mr_tgtdev_put(struct mpi3mr_tgt_dev *s)
* @dev_removed: Device removed in the Firmware
* @dev_removedelay: Device is waiting to be removed in FW
* @dev_type: Device type
+ * @dev_nvme_dif: Device is NVMe DIF enabled
+ * @wslen: Write same max length
* @io_throttle_enabled: I/O throttling needed or not
* @io_divert: Flag indicates io divert is on or off for the dev
* @throttle_group: Pointer to throttle group info
@@ -767,6 +776,8 @@ struct mpi3mr_stgt_priv_data {
u8 dev_removed;
u8 dev_removedelay;
u8 dev_type;
+ u8 dev_nvme_dif;
+ u16 wslen;
u8 io_throttle_enabled;
u8 io_divert;
struct mpi3mr_throttle_group_info *throttle_group;
@@ -782,12 +793,14 @@ struct mpi3mr_stgt_priv_data {
* @ncq_prio_enable: NCQ priority enable for SATA device
* @pend_count: Counter to track pending I/Os during error
* handling
+ * @wslen: Write same max length
*/
struct mpi3mr_sdev_priv_data {
struct mpi3mr_stgt_priv_data *tgt_priv_data;
u32 lun_id;
u8 ncq_prio_enable;
u32 pend_count;
+ u16 wslen;
};
/**
@@ -959,6 +972,7 @@ struct scmd_priv {
* @stop_drv_processing: Stop all command processing
* @device_refresh_on: Don't process the events until devices are refreshed
* @max_host_ios: Maximum host I/O count
+ * @max_sgl_entries: Max SGL entries per I/O
* @chain_buf_count: Chain buffer count
* @chain_buf_pool: Chain buffer pool
* @chain_sgl_list: Chain SGL list
@@ -1129,6 +1143,7 @@ struct mpi3mr_ioc {
u16 max_host_ios;
spinlock_t tgtdev_lock;
struct list_head tgtdev_list;
+ u16 max_sgl_entries;
u32 chain_buf_count;
struct dma_pool *chain_buf_pool;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 5fa07d6ee5b8..f039f1d98647 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -1163,6 +1163,12 @@ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
return -EPERM;
}
+ if (mrioc->shost->max_sectors != (mrioc->facts.max_data_length / 512))
+ ioc_err(mrioc, "Warning: The maximum data transfer length\n"
+ "\tchanged after reset: previous(%d), new(%d),\n"
+ "the driver cannot change this at run time\n",
+ mrioc->shost->max_sectors * 512, mrioc->facts.max_data_length);
+
if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities &
MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED))
ioc_err(mrioc,
@@ -2343,8 +2349,8 @@ static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
mrioc->init_cmds.is_waiting = 0;
if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_TSU_TIMEOUT, 1);
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_TSU_TIMEOUT);
retval = -1;
goto out_unlock;
}
@@ -2856,6 +2862,7 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
le16_to_cpu(facts_data->max_pcie_switches);
mrioc->facts.max_sasexpanders =
le16_to_cpu(facts_data->max_sas_expanders);
+ mrioc->facts.max_data_length = le16_to_cpu(facts_data->max_data_length);
mrioc->facts.max_sasinitiators =
le16_to_cpu(facts_data->max_sas_initiators);
mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
@@ -2893,13 +2900,18 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
mrioc->facts.io_throttle_high =
le16_to_cpu(facts_data->io_throttle_high);
+ if (mrioc->facts.max_data_length ==
+ MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED)
+ mrioc->facts.max_data_length = MPI3MR_DEFAULT_MAX_IO_SIZE;
+ else
+ mrioc->facts.max_data_length *= MPI3MR_PAGE_SIZE_4K;
/* Store in 512b block count */
if (mrioc->facts.io_throttle_data_length)
mrioc->io_throttle_data_length =
(mrioc->facts.io_throttle_data_length * 2 * 4);
else
/* set the length to 1MB + 1K to disable throttle */
- mrioc->io_throttle_data_length = MPI3MR_MAX_SECTORS + 2;
+ mrioc->io_throttle_data_length = (mrioc->facts.max_data_length / 512) + 2;
mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024);
mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024);
@@ -2914,9 +2926,9 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
mrioc->facts.sge_mod_shift);
- ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n",
+ ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x max_data_len (%d)\n",
mrioc->facts.dma_mask, (facts_flags &
- MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK));
+ MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK), mrioc->facts.max_data_length);
ioc_info(mrioc,
"max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n",
mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group);
@@ -3359,8 +3371,8 @@ int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1);
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_EVTACK_TIMEOUT);
retval = -1;
goto out_unlock;
}
@@ -3414,7 +3426,14 @@ static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
if (!mrioc->chain_sgl_list)
goto out_failed;
- sz = MPI3MR_PAGE_SIZE_4K;
+ if (mrioc->max_sgl_entries > (mrioc->facts.max_data_length /
+ MPI3MR_PAGE_SIZE_4K))
+ mrioc->max_sgl_entries = mrioc->facts.max_data_length /
+ MPI3MR_PAGE_SIZE_4K;
+ sz = mrioc->max_sgl_entries * sizeof(struct mpi3_sge_common);
+ ioc_info(mrioc, "number of sgl entries=%d chain buffer size=%dKB\n",
+ mrioc->max_sgl_entries, sz/1024);
+
mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
&mrioc->pdev->dev, sz, 16, 0);
if (!mrioc->chain_buf_pool) {
@@ -3813,7 +3832,7 @@ retry_init:
}
mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
-
+ mrioc->shost->max_sectors = mrioc->facts.max_data_length / 512;
mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group;
atomic_set(&mrioc->pend_large_data_sz, 0);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index d627355303d7..89ba015c5d7e 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -33,6 +33,12 @@ static int logging_level;
module_param(logging_level, int, 0);
MODULE_PARM_DESC(logging_level,
" bits for enabling additional logging info (default=0)");
+static int max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES;
+module_param(max_sgl_entries, int, 0444);
+MODULE_PARM_DESC(max_sgl_entries,
+ "Preferred max number of SG entries to be used for a single I/O\n"
+ "The actual value will be determined by the driver\n"
+ "(Minimum=256, Maximum=2048, default=256)");
/* Forward declarations*/
static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
@@ -424,6 +430,7 @@ void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
tgt_priv->io_throttle_enabled = 0;
tgt_priv->io_divert = 0;
tgt_priv->throttle_group = NULL;
+ tgt_priv->wslen = 0;
if (tgtdev->host_exposed)
atomic_set(&tgt_priv->block_io, 1);
}
@@ -1034,6 +1041,19 @@ mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
{
struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+ dprint_reset(mrioc, "refresh target devices: check for removals\n");
+ list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
+ list) {
+ if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) &&
+ tgtdev->host_exposed && tgtdev->starget &&
+ tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ tgt_priv->dev_removed = 1;
+ atomic_set(&tgt_priv->block_io, 0);
+ }
+ }
list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
list) {
@@ -1102,6 +1122,18 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
tgtdev->io_throttle_enabled =
(flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
+ switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) {
+ case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB:
+ tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS;
+ break;
+ case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB:
+ tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS;
+ break;
+ case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT:
+ default:
+ tgtdev->wslen = 0;
+ break;
+ }
if (tgtdev->starget && tgtdev->starget->hostdata) {
scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
@@ -1113,6 +1145,7 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
tgtdev->io_throttle_enabled;
if (is_added == true)
atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ scsi_tgt_priv_data->wslen = tgtdev->wslen;
}
switch (dev_pg0->access_status) {
@@ -3413,7 +3446,7 @@ static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc,
scsi_bufflen(scmd));
return -ENOMEM;
}
- if (sges_left > MPI3MR_SG_DEPTH) {
+ if (sges_left > mrioc->max_sgl_entries) {
sdev_printk(KERN_ERR, scmd->device,
"scsi_dma_map returned unsupported sge count %d!\n",
sges_left);
@@ -3934,6 +3967,48 @@ void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout)
}
/**
+ * mpi3mr_setup_divert_ws - Setup Divert IO flag for write same
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ * @scsiio_req: MPI3 SCSI IO request
+ * @scsiio_flags: Pointer to MPI3 SCSI IO Flags
+ * @wslen: write same max length
+ *
+ * Gets values of unmap, ndob and number of blocks from write
+ * same scsi io and based on these values it sets divert IO flag
+ * and reason for diverting IO to firmware.
+ *
+ * Return: Nothing
+ */
+static inline void mpi3mr_setup_divert_ws(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req,
+ u32 *scsiio_flags, u16 wslen)
+{
+ u8 unmap = 0, ndob = 0;
+ u8 opcode = scmd->cmnd[0];
+ u32 num_blocks = 0;
+ u16 sa = (scmd->cmnd[8] << 8) | (scmd->cmnd[9]);
+
+ if (opcode == WRITE_SAME_16) {
+ unmap = scmd->cmnd[1] & 0x08;
+ ndob = scmd->cmnd[1] & 0x01;
+ num_blocks = get_unaligned_be32(scmd->cmnd + 10);
+ } else if ((opcode == VARIABLE_LENGTH_CMD) && (sa == WRITE_SAME_32)) {
+ unmap = scmd->cmnd[10] & 0x08;
+ ndob = scmd->cmnd[10] & 0x01;
+ num_blocks = get_unaligned_be32(scmd->cmnd + 28);
+ } else
+ return;
+
+ if ((unmap) && (ndob) && (num_blocks > wslen)) {
+ scsiio_req->msg_flags |=
+ MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
+ *scsiio_flags |=
+ MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE;
+ }
+}
+
+/**
* mpi3mr_eh_host_reset - Host reset error handling callback
* @scmd: SCSI command reference
*
@@ -4430,7 +4505,6 @@ static int mpi3mr_target_alloc(struct scsi_target *starget)
unsigned long flags;
int retval = 0;
struct sas_rphy *rphy = NULL;
- bool update_stgt_priv_data = false;
scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL);
if (!scsi_tgt_priv_data)
@@ -4439,39 +4513,50 @@ static int mpi3mr_target_alloc(struct scsi_target *starget)
starget->hostdata = scsi_tgt_priv_data;
spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
-
if (starget->channel == mrioc->scsi_device_channel) {
tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
- if (tgt_dev && !tgt_dev->is_hidden)
- update_stgt_priv_data = true;
- else
+ if (tgt_dev && !tgt_dev->is_hidden) {
+ scsi_tgt_priv_data->starget = starget;
+ scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
+ scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
+ scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
+ scsi_tgt_priv_data->tgt_dev = tgt_dev;
+ tgt_dev->starget = starget;
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ retval = 0;
+ if ((tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
+ ((tgt_dev->dev_spec.pcie_inf.dev_info &
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
+ ((tgt_dev->dev_spec.pcie_inf.dev_info &
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK) !=
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0))
+ scsi_tgt_priv_data->dev_nvme_dif = 1;
+ scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled;
+ scsi_tgt_priv_data->wslen = tgt_dev->wslen;
+ if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)
+ scsi_tgt_priv_data->throttle_group = tgt_dev->dev_spec.vd_inf.tg;
+ } else
retval = -ENXIO;
} else if (mrioc->sas_transport_enabled && !starget->channel) {
rphy = dev_to_rphy(starget->dev.parent);
tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
rphy->identify.sas_address, rphy);
if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl &&
- (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA))
- update_stgt_priv_data = true;
- else
+ (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA)) {
+ scsi_tgt_priv_data->starget = starget;
+ scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
+ scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
+ scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
+ scsi_tgt_priv_data->tgt_dev = tgt_dev;
+ scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled;
+ scsi_tgt_priv_data->wslen = tgt_dev->wslen;
+ tgt_dev->starget = starget;
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ retval = 0;
+ } else
retval = -ENXIO;
}
-
- if (update_stgt_priv_data) {
- scsi_tgt_priv_data->starget = starget;
- scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
- scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
- scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
- scsi_tgt_priv_data->tgt_dev = tgt_dev;
- tgt_dev->starget = starget;
- atomic_set(&scsi_tgt_priv_data->block_io, 0);
- retval = 0;
- scsi_tgt_priv_data->io_throttle_enabled =
- tgt_dev->io_throttle_enabled;
- if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)
- scsi_tgt_priv_data->throttle_group =
- tgt_dev->dev_spec.vd_inf.tg;
- }
spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
return retval;
@@ -4732,6 +4817,10 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
mpi3mr_setup_eedp(mrioc, scmd, scsiio_req);
+ if (stgt_priv_data->wslen)
+ mpi3mr_setup_divert_ws(mrioc, scmd, scsiio_req, &scsiio_flags,
+ stgt_priv_data->wslen);
+
memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len);
scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd));
scsiio_req->dev_handle = cpu_to_le16(dev_handle);
@@ -4818,10 +4907,10 @@ static const struct scsi_host_template mpi3mr_driver_template = {
.no_write_same = 1,
.can_queue = 1,
.this_id = -1,
- .sg_tablesize = MPI3MR_SG_DEPTH,
+ .sg_tablesize = MPI3MR_DEFAULT_SGL_ENTRIES,
/* max xfer supported is 1M (2K in 512 byte sized sectors)
*/
- .max_sectors = 2048,
+ .max_sectors = (MPI3MR_DEFAULT_MAX_IO_SIZE / 512),
.cmd_per_lun = MPI3MR_MAX_CMDS_LUN,
.max_segment_size = 0xffffffff,
.track_queue_depth = 1,
@@ -5004,6 +5093,16 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mrioc->pdev = pdev;
mrioc->stop_bsgs = 1;
+ mrioc->max_sgl_entries = max_sgl_entries;
+ if (max_sgl_entries > MPI3MR_MAX_SGL_ENTRIES)
+ mrioc->max_sgl_entries = MPI3MR_MAX_SGL_ENTRIES;
+ else if (max_sgl_entries < MPI3MR_DEFAULT_SGL_ENTRIES)
+ mrioc->max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES;
+ else {
+ mrioc->max_sgl_entries /= MPI3MR_DEFAULT_SGL_ENTRIES;
+ mrioc->max_sgl_entries *= MPI3MR_DEFAULT_SGL_ENTRIES;
+ }
+
/* init shost parameters */
shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH;
shost->max_lun = -1;
@@ -5068,7 +5167,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->nr_maps = 3;
shost->can_queue = mrioc->max_host_ios;
- shost->sg_tablesize = MPI3MR_SG_DEPTH;
+ shost->sg_tablesize = mrioc->max_sgl_entries;
shost->max_id = mrioc->facts.max_perids + 1;
retval = scsi_add_host(shost, &pdev->dev);
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index ed3923f8db4f..6de35b32223c 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -199,7 +199,7 @@
*
*****************************************************************************/
-typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS {
+typedef struct _MPI2_SYSTEM_INTERFACE_REGS {
U32 Doorbell; /*0x00 */
U32 WriteSequence; /*0x04 */
U32 HostDiagnostic; /*0x08 */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 53f5492579cb..61a32bf00747 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -138,6 +138,9 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
static void
_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
+static u32
+_base_readl_ext_retry(const void __iomem *addr);
+
/**
* mpt3sas_base_check_cmd_timeout - Function
* to check timeout and command termination due
@@ -201,7 +204,7 @@ module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
* while reading the system interface register.
*/
static inline u32
-_base_readl_aero(const volatile void __iomem *addr)
+_base_readl_aero(const void __iomem *addr)
{
u32 i = 0, ret_val;
@@ -213,8 +216,22 @@ _base_readl_aero(const volatile void __iomem *addr)
return ret_val;
}
+static u32
+_base_readl_ext_retry(const void __iomem *addr)
+{
+ u32 i, ret_val;
+
+ for (i = 0 ; i < 30 ; i++) {
+ ret_val = readl(addr);
+ if (ret_val == 0)
+ continue;
+ }
+
+ return ret_val;
+}
+
static inline u32
-_base_readl(const volatile void __iomem *addr)
+_base_readl(const void __iomem *addr)
{
return readl(addr);
}
@@ -940,7 +957,7 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
dump_stack();
- doorbell = ioc->base_readl(&ioc->chip->Doorbell);
+ doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
mpt3sas_print_fault_code(ioc, doorbell &
MPI2_DOORBELL_DATA_MASK);
@@ -6686,7 +6703,7 @@ mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
{
u32 s, sc;
- s = ioc->base_readl(&ioc->chip->Doorbell);
+ s = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
sc = s & MPI2_IOC_STATE_MASK;
return cooked ? sc : s;
}
@@ -6831,7 +6848,7 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
__func__, count, timeout));
return 0;
} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
- doorbell = ioc->base_readl(&ioc->chip->Doorbell);
+ doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
if ((doorbell & MPI2_IOC_STATE_MASK) ==
MPI2_IOC_STATE_FAULT) {
mpt3sas_print_fault_code(ioc, doorbell);
@@ -6871,7 +6888,7 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
count = 0;
cntdn = 1000 * timeout;
do {
- doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
+ doorbell_reg = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
dhsprintk(ioc,
ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
@@ -7019,7 +7036,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
__le32 *mfp;
/* make sure doorbell is not in use */
- if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
+ if ((ioc->base_readl_ext_retry(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
return -EFAULT;
}
@@ -7068,7 +7085,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
}
/* read the first two 16-bits, it gives the total length of the reply */
- reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
+ reply[0] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
if ((_base_wait_for_doorbell_int(ioc, 5))) {
@@ -7076,7 +7093,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
__LINE__);
return -EFAULT;
}
- reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
+ reply[1] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
@@ -7087,10 +7104,10 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
return -EFAULT;
}
if (i >= reply_bytes/2) /* overflow case */
- ioc->base_readl(&ioc->chip->Doorbell);
+ ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
else
reply[i] = le16_to_cpu(
- ioc->base_readl(&ioc->chip->Doorbell)
+ ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
}
@@ -7949,7 +7966,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
goto out;
}
- host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
+ host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
drsprintk(ioc,
ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
count, host_diagnostic));
@@ -7969,7 +7986,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
for (count = 0; count < (300000000 /
MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
- host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
+ host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
if (host_diagnostic == 0xFFFFFFFF) {
ioc_info(ioc,
@@ -8359,10 +8376,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->rdpq_array_enable_assigned = 0;
ioc->use_32bit_dma = false;
ioc->dma_mask = 64;
- if (ioc->is_aero_ioc)
+ if (ioc->is_aero_ioc) {
ioc->base_readl = &_base_readl_aero;
- else
+ ioc->base_readl_ext_retry = &_base_readl_ext_retry;
+ } else {
ioc->base_readl = &_base_readl;
+ ioc->base_readl_ext_retry = &_base_readl;
+ }
r = mpt3sas_base_map_resources(ioc);
if (r)
goto out_free_resources;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 05364aa15ecd..1be0850ca17a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -994,7 +994,7 @@ typedef void (*NVME_BUILD_PRP)(struct MPT3SAS_ADAPTER *ioc, u16 smid,
typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 funcdep);
typedef void (*PUT_SMID_DEFAULT) (struct MPT3SAS_ADAPTER *ioc, u16 smid);
-typedef u32 (*BASE_READ_REG) (const volatile void __iomem *addr);
+typedef u32 (*BASE_READ_REG) (const void __iomem *addr);
/*
* To get high iops reply queue's msix index when high iops mode is enabled
* else get the msix index of general reply queues.
@@ -1618,6 +1618,7 @@ struct MPT3SAS_ADAPTER {
u8 diag_trigger_active;
u8 atomic_desc_capable;
BASE_READ_REG base_readl;
+ BASE_READ_REG base_readl_ext_retry;
struct SL_WH_MASTER_TRIGGER_T diag_trigger_master;
struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi;
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 49e2a5e7ce54..43ebb331e216 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -84,10 +84,8 @@ static void mvs_phy_init(struct mvs_info *mvi, int phy_id)
phy->port = NULL;
timer_setup(&phy->timer, NULL, 0);
sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
- sas_phy->class = SAS;
sas_phy->iproto = SAS_PROTOCOL_ALL;
sas_phy->tproto = 0;
- sas_phy->type = PHY_TYPE_PHYSICAL;
sas_phy->role = PHY_ROLE_INITIATOR;
sas_phy->oob_mode = OOB_NOT_CONNECTED;
sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
@@ -416,7 +414,7 @@ static int mvs_prep_sas_ha_init(struct Scsi_Host *shost,
sha->sas_phy = arr_phy;
sha->sas_port = arr_port;
- sha->core.shost = shost;
+ sha->shost = shost;
sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL);
if (!sha->lldd_ha)
@@ -458,7 +456,6 @@ static void mvs_post_sas_ha_init(struct Scsi_Host *shost,
sha->sas_ha_name = DRV_NAME;
sha->dev = mvi->dev;
- sha->lldd_module = THIS_MODULE;
sha->sas_addr = &mvi->sas_addr[0];
sha->num_phys = nr_core * chip_info->n_phy;
@@ -473,7 +470,7 @@ static void mvs_post_sas_ha_init(struct Scsi_Host *shost,
shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG);
shost->can_queue = can_queue;
mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE;
- sha->core.shost = mvi->shost;
+ sha->shost = mvi->shost;
}
static void mvs_init_sas_add(struct mvs_info *mvi)
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 9978c424214c..1444b1f1c4c8 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -564,7 +564,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
void *buf_prd;
struct ssp_frame_hdr *ssp_hdr;
void *buf_tmp;
- u8 *buf_cmd, *buf_oaf, fburst = 0;
+ u8 *buf_cmd, *buf_oaf;
dma_addr_t buf_tmp_dma;
u32 flags;
u32 resp_len, req_len, i, tag = tei->tag;
@@ -582,10 +582,6 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
(phy_mask << TXQ_PHY_SHIFT));
flags = MCH_RETRY;
- if (task->ssp_task.enable_first_burst) {
- flags |= MCH_FBURST;
- fburst = (1 << 7);
- }
if (is_tmf)
flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
else
@@ -667,8 +663,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
if (ssp_hdr->frame_type != SSP_TASK) {
- buf_cmd[9] = fburst | task->ssp_task.task_attr |
- (task->ssp_task.task_prio << 3);
+ buf_cmd[9] = task->ssp_task.task_attr;
memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
} else{
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 73aa7059b556..d9d366ec17dc 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -1500,7 +1500,7 @@ static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
if (sdev) {
- scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_rescan_device(sdev);
scsi_device_put(sdev);
}
}
@@ -2490,7 +2490,7 @@ static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
mhba->pdev = pdev;
mhba->shost = host;
- mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
+ mhba->unique_id = pci_dev_id(pdev);
ret = mvumi_init_fw(mhba);
if (ret)
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 73cd25f30ca5..90069c7b1642 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -4053,9 +4053,6 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
ssp_cmd.tag = cpu_to_le32(tag);
- if (task->ssp_task.enable_first_burst)
- ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
- ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
@@ -4095,7 +4092,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
u32 hdr_tag, ncg_tag = 0;
u64 phys_addr;
u32 ATAP = 0x0;
- u32 dir;
+ u32 dir, retfis = 0;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
memset(&sata_cmd, 0, sizeof(sata_cmd));
@@ -4124,8 +4121,11 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
sata_cmd.tag = cpu_to_le32(tag);
sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
sata_cmd.data_len = cpu_to_le32(task->total_xfer_len);
- sata_cmd.ncqtag_atap_dir_m =
- cpu_to_le32(((ncg_tag & 0xff)<<16)|((ATAP & 0x3f) << 10) | dir);
+ if (task->ata_task.return_fis_on_success)
+ retfis = 1;
+ sata_cmd.retfis_ncqtag_atap_dir_m =
+ cpu_to_le32((retfis << 24) | ((ncg_tag & 0xff) << 16) |
+ ((ATAP & 0x3f) << 10) | dir);
sata_cmd.sata_fis = task->ata_task.fis;
if (likely(!task->ata_task.device_control_reg_update))
sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */
@@ -4180,7 +4180,7 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
payload.sas_identify.dev_type = SAS_END_DEVICE;
payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
memcpy(payload.sas_identify.sas_addr,
- pm8001_ha->sas_addr, SAS_ADDR_SIZE);
+ &pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE);
payload.sas_identify.phy_id = phy_id;
return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index 961d0465b923..fc2127dcb58d 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -515,7 +515,7 @@ struct sata_start_req {
__le32 tag;
__le32 device_id;
__le32 data_len;
- __le32 ncqtag_atap_dir_m;
+ __le32 retfis_ncqtag_atap_dir_m;
struct host_to_dev_fis sata_fis;
u32 reserved1;
u32 reserved2;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 4995e1ef4e0e..443a3176c6c0 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -162,10 +162,8 @@ static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
- sas_phy->class = SAS;
sas_phy->iproto = SAS_PROTOCOL_ALL;
sas_phy->tproto = 0;
- sas_phy->type = PHY_TYPE_PHYSICAL;
sas_phy->role = PHY_ROLE_INITIATOR;
sas_phy->oob_mode = OOB_NOT_CONNECTED;
sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
@@ -275,7 +273,6 @@ static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
return ret;
}
-static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha);
static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha);
/**
@@ -296,13 +293,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
pm8001_dbg(pm8001_ha, INIT, "pm8001_alloc: PHY:%x\n",
pm8001_ha->chip->n_phy);
- /* Setup Interrupt */
- rc = pm8001_setup_irq(pm8001_ha);
- if (rc) {
- pm8001_dbg(pm8001_ha, FAIL,
- "pm8001_setup_irq failed [ret: %d]\n", rc);
- goto err_out;
- }
/* Request Interrupt */
rc = pm8001_request_irq(pm8001_ha);
if (rc)
@@ -654,10 +644,9 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
sha->sas_ha_name = DRV_NAME;
sha->dev = pm8001_ha->dev;
sha->strict_wide_ports = 1;
- sha->lldd_module = THIS_MODULE;
sha->sas_addr = &pm8001_ha->sas_addr[0];
sha->num_phys = chip_info->n_phy;
- sha->core.shost = shost;
+ sha->shost = shost;
}
/**
@@ -1034,47 +1023,38 @@ static u32 pm8001_request_msix(struct pm8001_hba_info *pm8001_ha)
}
#endif
-static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha)
-{
- struct pci_dev *pdev;
-
- pdev = pm8001_ha->pdev;
-
-#ifdef PM8001_USE_MSIX
- if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
- return pm8001_setup_msix(pm8001_ha);
- pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
-#endif
- return 0;
-}
-
/**
* pm8001_request_irq - register interrupt
* @pm8001_ha: our ha struct.
*/
static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
{
- struct pci_dev *pdev;
+ struct pci_dev *pdev = pm8001_ha->pdev;
+#ifdef PM8001_USE_MSIX
int rc;
- pdev = pm8001_ha->pdev;
+ if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
+ rc = pm8001_setup_msix(pm8001_ha);
+ if (rc) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "pm8001_setup_irq failed [ret: %d]\n", rc);
+ return rc;
+ }
-#ifdef PM8001_USE_MSIX
- if (pdev->msix_cap && pci_msi_enabled())
- return pm8001_request_msix(pm8001_ha);
- else {
- pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
- goto intx;
+ if (pdev->msix_cap && pci_msi_enabled())
+ return pm8001_request_msix(pm8001_ha);
}
+
+ pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
#endif
-intx:
/* initialize the INT-X interrupt */
pm8001_ha->irq_vector[0].irq_id = 0;
pm8001_ha->irq_vector[0].drv_inst = pm8001_ha;
- rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED,
- pm8001_ha->name, SHOST_TO_SAS_HA(pm8001_ha->shost));
- return rc;
+
+ return request_irq(pdev->irq, pm8001_interrupt_handler_intx,
+ IRQF_SHARED, pm8001_ha->name,
+ SHOST_TO_SAS_HA(pm8001_ha->shost));
}
/**
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 953572fc0d9e..2fadd353f1c1 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -702,8 +702,6 @@ int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb);
int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha, void *piomb);
int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
-struct sas_task *pm8001_alloc_task(void);
-void pm8001_free_task(struct sas_task *task);
void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag);
struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
u32 device_id);
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 39a12ee94a72..3afd9443c425 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -3671,10 +3671,12 @@ static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
(struct set_ctrl_cfg_resp *)(piomb + 4);
u32 status = le32_to_cpu(pPayload->status);
u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd);
+ u32 tag = le32_to_cpu(pPayload->tag);
pm8001_dbg(pm8001_ha, MSG,
"SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n",
status, err_qlfr_pgcd);
+ pm8001_tag_free(pm8001_ha, tag);
return 0;
}
@@ -4316,9 +4318,6 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
ssp_cmd.tag = cpu_to_le32(tag);
- if (task->ssp_task.enable_first_burst)
- ssp_cmd.ssp_iu.efb_prio_attr = 0x80;
- ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
@@ -4457,7 +4456,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
u64 phys_addr, end_addr;
u32 end_addr_high, end_addr_low;
u32 ATAP = 0x0;
- u32 dir;
+ u32 dir, retfis = 0;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
memset(&sata_cmd, 0, sizeof(sata_cmd));
@@ -4487,7 +4486,8 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
sata_cmd.tag = cpu_to_le32(tag);
sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
sata_cmd.data_len = cpu_to_le32(task->total_xfer_len);
-
+ if (task->ata_task.return_fis_on_success)
+ retfis = 1;
sata_cmd.sata_fis = task->ata_task.fis;
if (likely(!task->ata_task.device_control_reg_update))
sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */
@@ -4500,12 +4500,10 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
"Encryption enabled.Sending Encrypt SATA cmd 0x%x\n",
sata_cmd.sata_fis.command);
opc = OPC_INB_SATA_DIF_ENC_IO;
-
- /* set encryption bit */
- sata_cmd.ncqtag_atap_dir_m_dad =
- cpu_to_le32(((ncg_tag & 0xff)<<16)|
- ((ATAP & 0x3f) << 10) | 0x20 | dir);
- /* dad (bit 0-1) is 0 */
+ /* set encryption bit; dad (bits 0-1) is 0 */
+ sata_cmd.retfis_ncqtag_atap_dir_m_dad =
+ cpu_to_le32((retfis << 24) | ((ncg_tag & 0xff) << 16) |
+ ((ATAP & 0x3f) << 10) | 0x20 | dir);
/* fill in PRD (scatter/gather) table, if any */
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter,
@@ -4568,11 +4566,10 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
pm8001_dbg(pm8001_ha, IO,
"Sending Normal SATA command 0x%x inb %x\n",
sata_cmd.sata_fis.command, q_index);
- /* dad (bit 0-1) is 0 */
- sata_cmd.ncqtag_atap_dir_m_dad =
- cpu_to_le32(((ncg_tag & 0xff)<<16) |
- ((ATAP & 0x3f) << 10) | dir);
-
+ /* dad (bits 0-1) is 0 */
+ sata_cmd.retfis_ncqtag_atap_dir_m_dad =
+ cpu_to_le32((retfis << 24) | ((ncg_tag & 0xff) << 16) |
+ ((ATAP & 0x3f) << 10) | dir);
/* fill in PRD (scatter/gather) table, if any */
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter,
@@ -4676,7 +4673,7 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
payload.sas_identify.dev_type = SAS_END_DEVICE;
payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
memcpy(payload.sas_identify.sas_addr,
- &pm8001_ha->sas_addr, SAS_ADDR_SIZE);
+ &pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE);
payload.sas_identify.phy_id = phy_id;
return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index acf6e3005b84..eb8fd37b2066 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -731,7 +731,7 @@ struct sata_start_req {
__le32 tag;
__le32 device_id;
__le32 data_len;
- __le32 ncqtag_atap_dir_m_dad;
+ __le32 retfis_ncqtag_atap_dir_m_dad;
struct host_to_dev_fis sata_fis;
u32 reserved1;
u32 reserved2; /* dword 11. rsvd for normal I/O. */
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 9415a4819470..50dc30051f22 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3584,8 +3584,7 @@ static ssize_t pmcraid_show_adapter_id(
struct Scsi_Host *shost = class_to_shost(dev);
struct pmcraid_instance *pinstance =
(struct pmcraid_instance *)shost->hostdata;
- u32 adapter_id = (pinstance->pdev->bus->number << 8) |
- pinstance->pdev->devfn;
+ u32 adapter_id = pci_dev_id(pinstance->pdev);
u32 aen_group = pmcraid_event_family.id;
return snprintf(buf, PAGE_SIZE,
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 909c49541984..d592ee9170c1 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -45,6 +45,11 @@ typedef struct {
#include "ppa.h"
+static unsigned int mode = PPA_AUTODETECT;
+module_param(mode, uint, 0644);
+MODULE_PARM_DESC(mode, "Transfer mode (0 = Autodetect, 1 = SPP 4-bit, "
+ "2 = SPP 8-bit, 3 = EPP 8-bit, 4 = EPP 16-bit, 5 = EPP 32-bit");
+
static struct scsi_pointer *ppa_scsi_pointer(struct scsi_cmnd *cmd)
{
return scsi_cmd_priv(cmd);
@@ -157,7 +162,7 @@ static int ppa_show_info(struct seq_file *m, struct Scsi_Host *host)
return 0;
}
-static int device_check(ppa_struct *dev);
+static int device_check(ppa_struct *dev, bool autodetect);
#if PPA_DEBUG > 0
#define ppa_fail(x,y) printk("ppa: ppa_fail(%i) from %s at line %d\n",\
@@ -302,13 +307,10 @@ static int ppa_out(ppa_struct *dev, char *buffer, int len)
case PPA_EPP_8:
epp_reset(ppb);
w_ctr(ppb, 0x4);
-#ifdef CONFIG_SCSI_IZIP_EPP16
- if (!(((long) buffer | len) & 0x01))
- outsw(ppb + 4, buffer, len >> 1);
-#else
- if (!(((long) buffer | len) & 0x03))
+ if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x03))
outsl(ppb + 4, buffer, len >> 2);
-#endif
+ else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x01))
+ outsw(ppb + 4, buffer, len >> 1);
else
outsb(ppb + 4, buffer, len);
w_ctr(ppb, 0xc);
@@ -355,13 +357,10 @@ static int ppa_in(ppa_struct *dev, char *buffer, int len)
case PPA_EPP_8:
epp_reset(ppb);
w_ctr(ppb, 0x24);
-#ifdef CONFIG_SCSI_IZIP_EPP16
- if (!(((long) buffer | len) & 0x01))
- insw(ppb + 4, buffer, len >> 1);
-#else
- if (!(((long) buffer | len) & 0x03))
+ if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x03))
insl(ppb + 4, buffer, len >> 2);
-#endif
+ else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x01))
+ insw(ppb + 4, buffer, len >> 1);
else
insb(ppb + 4, buffer, len);
w_ctr(ppb, 0x2c);
@@ -469,6 +468,27 @@ static int ppa_init(ppa_struct *dev)
{
int retv;
unsigned short ppb = dev->base;
+ bool autodetect = dev->mode == PPA_AUTODETECT;
+
+ if (autodetect) {
+ int modes = dev->dev->port->modes;
+ int ppb_hi = dev->dev->port->base_hi;
+
+ /* Mode detection works up the chain of speed
+ * This avoids a nasty if-then-else-if-... tree
+ */
+ dev->mode = PPA_NIBBLE;
+
+ if (modes & PARPORT_MODE_TRISTATE)
+ dev->mode = PPA_PS2;
+
+ if (modes & PARPORT_MODE_ECP) {
+ w_ecr(ppb_hi, 0x20);
+ dev->mode = PPA_PS2;
+ }
+ if ((modes & PARPORT_MODE_EPP) && (modes & PARPORT_MODE_ECP))
+ w_ecr(ppb_hi, 0x80);
+ }
ppa_disconnect(dev);
ppa_connect(dev, CONNECT_NORMAL);
@@ -492,7 +512,7 @@ static int ppa_init(ppa_struct *dev)
if (retv)
return -EIO;
- return device_check(dev);
+ return device_check(dev, autodetect);
}
static inline int ppa_send_command(struct scsi_cmnd *cmd)
@@ -637,7 +657,7 @@ static void ppa_interrupt(struct work_struct *work)
case DID_OK:
break;
case DID_NO_CONNECT:
- printk(KERN_DEBUG "ppa: no device at SCSI ID %i\n", cmd->device->target);
+ printk(KERN_DEBUG "ppa: no device at SCSI ID %i\n", scmd_id(cmd));
break;
case DID_BUS_BUSY:
printk(KERN_DEBUG "ppa: BUS BUSY - EPP timeout detected\n");
@@ -883,7 +903,7 @@ static int ppa_reset(struct scsi_cmnd *cmd)
return SUCCESS;
}
-static int device_check(ppa_struct *dev)
+static int device_check(ppa_struct *dev, bool autodetect)
{
/* This routine looks for a device and then attempts to use EPP
to send a command. If all goes as planned then EPP is available. */
@@ -895,8 +915,8 @@ static int device_check(ppa_struct *dev)
old_mode = dev->mode;
for (loop = 0; loop < 8; loop++) {
/* Attempt to use EPP for Test Unit Ready */
- if ((ppb & 0x0007) == 0x0000)
- dev->mode = PPA_EPP_32;
+ if (autodetect && (ppb & 0x0007) == 0x0000)
+ dev->mode = PPA_EPP_8;
second_pass:
ppa_connect(dev, CONNECT_EPP_MAYBE);
@@ -924,7 +944,7 @@ second_pass:
udelay(1000);
ppa_disconnect(dev);
udelay(1000);
- if (dev->mode == PPA_EPP_32) {
+ if (dev->mode != old_mode) {
dev->mode = old_mode;
goto second_pass;
}
@@ -947,7 +967,7 @@ second_pass:
udelay(1000);
ppa_disconnect(dev);
udelay(1000);
- if (dev->mode == PPA_EPP_32) {
+ if (dev->mode != old_mode) {
dev->mode = old_mode;
goto second_pass;
}
@@ -1026,7 +1046,6 @@ static int __ppa_attach(struct parport *pb)
DEFINE_WAIT(wait);
ppa_struct *dev, *temp;
int ports;
- int modes, ppb, ppb_hi;
int err = -ENOMEM;
struct pardev_cb ppa_cb;
@@ -1034,7 +1053,7 @@ static int __ppa_attach(struct parport *pb)
if (!dev)
return -ENOMEM;
dev->base = -1;
- dev->mode = PPA_AUTODETECT;
+ dev->mode = mode < PPA_UNKNOWN ? mode : PPA_AUTODETECT;
dev->recon_tmo = PPA_RECON_TMO;
init_waitqueue_head(&waiting);
temp = find_parent();
@@ -1069,25 +1088,8 @@ static int __ppa_attach(struct parport *pb)
}
dev->waiting = NULL;
finish_wait(&waiting, &wait);
- ppb = dev->base = dev->dev->port->base;
- ppb_hi = dev->dev->port->base_hi;
- w_ctr(ppb, 0x0c);
- modes = dev->dev->port->modes;
-
- /* Mode detection works up the chain of speed
- * This avoids a nasty if-then-else-if-... tree
- */
- dev->mode = PPA_NIBBLE;
-
- if (modes & PARPORT_MODE_TRISTATE)
- dev->mode = PPA_PS2;
-
- if (modes & PARPORT_MODE_ECP) {
- w_ecr(ppb_hi, 0x20);
- dev->mode = PPA_PS2;
- }
- if ((modes & PARPORT_MODE_EPP) && (modes & PARPORT_MODE_ECP))
- w_ecr(ppb_hi, 0x80);
+ dev->base = dev->dev->port->base;
+ w_ctr(dev->base, 0x0c);
/* Done configuration */
diff --git a/drivers/scsi/ppa.h b/drivers/scsi/ppa.h
index 6a1f8a2d70eb..098bcf7b9eb4 100644
--- a/drivers/scsi/ppa.h
+++ b/drivers/scsi/ppa.h
@@ -107,11 +107,7 @@ static char *PPA_MODE_STRING[] =
"PS/2",
"EPP 8 bit",
"EPP 16 bit",
-#ifdef CONFIG_SCSI_IZIP_EPP16
- "EPP 16 bit",
-#else
"EPP 32 bit",
-#endif
"Unknown"};
/* other options */
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index c5c0bbdafc4e..1619cc33034f 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -548,7 +548,6 @@ extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data);
extern void qedf_wq_grcdump(struct work_struct *work);
void qedf_stag_change_work(struct work_struct *work);
void qedf_ctx_soft_reset(struct fc_lport *lport);
-extern void qedf_board_disable_work(struct work_struct *work);
extern void qedf_schedule_hw_err_handler(void *dev,
enum qed_hw_err_type err_type);
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index f4d81127239e..5ec2b817c694 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -59,6 +59,8 @@ extern uint qedf_debug;
#define QEDF_LOG_NOTICE 0x40000000 /* Notice logs */
#define QEDF_LOG_WARN 0x80000000 /* Warning logs */
+#define QEDF_DEBUGFS_LOG_LEN (2 * PAGE_SIZE)
+
/* Debug context structure */
struct qedf_dbg_ctx {
unsigned int host_no;
diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
index a3ed681c8ce3..451fd236bfd0 100644
--- a/drivers/scsi/qedf/qedf_debugfs.c
+++ b/drivers/scsi/qedf/qedf_debugfs.c
@@ -8,6 +8,7 @@
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/module.h>
+#include <linux/vmalloc.h>
#include "qedf.h"
#include "qedf_dbg.h"
@@ -98,7 +99,9 @@ static ssize_t
qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count,
loff_t *ppos)
{
+ ssize_t ret;
size_t cnt = 0;
+ char *cbuf;
int id;
struct qedf_fastpath *fp = NULL;
struct qedf_dbg_ctx *qedf_dbg =
@@ -108,19 +111,25 @@ qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count,
QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
- cnt = sprintf(buffer, "\nFastpath I/O completions\n\n");
+ cbuf = vmalloc(QEDF_DEBUGFS_LOG_LEN);
+ if (!cbuf)
+ return 0;
+
+ cnt += scnprintf(cbuf + cnt, QEDF_DEBUGFS_LOG_LEN - cnt, "\nFastpath I/O completions\n\n");
for (id = 0; id < qedf->num_queues; id++) {
fp = &(qedf->fp_array[id]);
if (fp->sb_id == QEDF_SB_ID_NULL)
continue;
- cnt += sprintf((buffer + cnt), "#%d: %lu\n", id,
- fp->completions);
+ cnt += scnprintf(cbuf + cnt, QEDF_DEBUGFS_LOG_LEN - cnt,
+ "#%d: %lu\n", id, fp->completions);
}
- cnt = min_t(int, count, cnt - *ppos);
- *ppos += cnt;
- return cnt;
+ ret = simple_read_from_buffer(buffer, count, ppos, cbuf, cnt);
+
+ vfree(cbuf);
+
+ return ret;
}
static ssize_t
@@ -138,15 +147,14 @@ qedf_dbg_debug_cmd_read(struct file *filp, char __user *buffer, size_t count,
loff_t *ppos)
{
int cnt;
+ char cbuf[32];
struct qedf_dbg_ctx *qedf_dbg =
(struct qedf_dbg_ctx *)filp->private_data;
QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "debug mask=0x%x\n", qedf_debug);
- cnt = sprintf(buffer, "debug mask = 0x%x\n", qedf_debug);
+ cnt = scnprintf(cbuf, sizeof(cbuf), "debug mask = 0x%x\n", qedf_debug);
- cnt = min_t(int, count, cnt - *ppos);
- *ppos += cnt;
- return cnt;
+ return simple_read_from_buffer(buffer, count, ppos, cbuf, cnt);
}
static ssize_t
@@ -185,18 +193,17 @@ qedf_dbg_stop_io_on_error_cmd_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
int cnt;
+ char cbuf[7];
struct qedf_dbg_ctx *qedf_dbg =
(struct qedf_dbg_ctx *)filp->private_data;
struct qedf_ctx *qedf = container_of(qedf_dbg,
struct qedf_ctx, dbg_ctx);
QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
- cnt = sprintf(buffer, "%s\n",
+ cnt = scnprintf(cbuf, sizeof(cbuf), "%s\n",
qedf->stop_io_on_error ? "true" : "false");
- cnt = min_t(int, count, cnt - *ppos);
- *ppos += cnt;
- return cnt;
+ return simple_read_from_buffer(buffer, count, ppos, cbuf, cnt);
}
static ssize_t
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 4750ec5789a8..10fe3383855c 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -1904,6 +1904,7 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
goto drop_rdata_kref;
}
+ spin_lock_irqsave(&fcport->rport_lock, flags);
if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
@@ -1911,17 +1912,20 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
"io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
io_req->xid, io_req->sc_cmd);
rc = 1;
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
goto drop_rdata_kref;
}
+ /* Set the command type to abort */
+ io_req->cmd_type = QEDF_ABTS;
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
+
kref_get(&io_req->refcount);
xid = io_req->xid;
qedf->control_requests++;
qedf->packet_aborts++;
- /* Set the command type to abort */
- io_req->cmd_type = QEDF_ABTS;
io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
@@ -2210,7 +2214,9 @@ process_els:
refcount, fcport, fcport->rdata->ids.port_id);
/* Cleanup cmds re-use the same TID as the original I/O */
+ spin_lock_irqsave(&fcport->rport_lock, flags);
io_req->cmd_type = QEDF_CLEANUP;
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
init_completion(&io_req->cleanup_done);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 7825765c936c..91f3f1d7098e 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2805,6 +2805,8 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
struct qedf_ioreq *io_req;
struct qedf_rport *fcport;
u32 comp_type;
+ u8 io_comp_type;
+ unsigned long flags;
comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
FCOE_CQE_CQE_TYPE_MASK;
@@ -2838,11 +2840,14 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
return;
}
+ spin_lock_irqsave(&fcport->rport_lock, flags);
+ io_comp_type = io_req->cmd_type;
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
switch (comp_type) {
case FCOE_GOOD_COMPLETION_CQE_TYPE:
atomic_inc(&fcport->free_sqes);
- switch (io_req->cmd_type) {
+ switch (io_comp_type) {
case QEDF_SCSI_CMD:
qedf_scsi_completion(qedf, cqe, io_req);
break;
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index 0e316cc24b19..772218445a56 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -67,8 +67,6 @@ void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id);
u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl);
void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id);
-int qedi_create_sysfs_ctx_attr(struct qedi_ctx *qedi);
-void qedi_remove_sysfs_ctx_attr(struct qedi_ctx *qedi);
void qedi_clearsq(struct qedi_ctx *qedi,
struct qedi_conn *qedi_conn,
struct iscsi_task *task);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index b00222459607..44449c70a375 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -3093,8 +3093,6 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_user, vha, 0x7082,
"Registered for DIF/DIX type 1 and 3 protection.\n");
- if (ql2xenabledif == 1)
- prot = SHOST_DIX_TYPE0_PROTECTION;
scsi_host_set_prot(vha->host,
prot | SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index d7e8454304ce..691ef827a5ab 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -12,13 +12,12 @@
* ----------------------------------------------------------------------
* | Module Init and Probe | 0x0199 | |
* | Mailbox commands | 0x1206 | 0x11a5-0x11ff |
- * | Device Discovery | 0x2134 | 0x210e-0x2115 |
- * | | | 0x211c-0x2128 |
- * | | | 0x212c-0x2134 |
+ * | Device Discovery | 0x2134 | 0x2112-0x2115 |
+ * | | | 0x2127-0x2128 |
* | Queue Command and IO tracing | 0x3074 | 0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
- * | | | 0x302d,0x3033 |
+ * | | | 0x302e,0x3033 |
* | | | 0x3036,0x3038 |
* | | | 0x303a |
* | DPC Thread | 0x4023 | 0x4002,0x4013 |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 70482b55d240..54f0a412226f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -368,6 +368,7 @@ ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
#define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */
#define ql_dbg_edif 0x00000400 /* edif and purex debug */
+#define ql_dbg_unsol 0x00000100 /* Unsolicited path debug */
extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
uint32_t, void **);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 4ae38305c15a..deb642607deb 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -346,6 +346,12 @@ struct name_list_extended {
u8 sent;
};
+struct qla_nvme_fc_rjt {
+ struct fcnvme_ls_rjt *c;
+ dma_addr_t cdma;
+ u16 size;
+};
+
struct els_reject {
struct fc_els_ls_rjt *c;
dma_addr_t cdma;
@@ -466,6 +472,7 @@ static inline be_id_t port_id_to_be_id(port_id_t port_id)
}
struct tmf_arg {
+ struct list_head tmf_elem;
struct qla_qpair *qpair;
struct fc_port *fcport;
struct scsi_qla_host *vha;
@@ -502,6 +509,20 @@ struct ct_arg {
port_id_t id;
};
+struct qla_nvme_lsrjt_pt_arg {
+ struct fc_port *fcport;
+ u8 opcode;
+ u8 vp_idx;
+ u8 reason;
+ u8 explanation;
+ __le16 nport_handle;
+ u16 control_flags;
+ __le16 ox_id;
+ __le32 xchg_address;
+ u32 tx_byte_count, rx_byte_count;
+ dma_addr_t tx_addr, rx_addr;
+};
+
/*
* SRB extensions.
*/
@@ -610,13 +631,16 @@ struct srb_iocb {
void *desc;
/* These are only used with ls4 requests */
- int cmd_len;
- int rsp_len;
+ __le32 cmd_len;
+ __le32 rsp_len;
dma_addr_t cmd_dma;
dma_addr_t rsp_dma;
enum nvmefc_fcp_datadir dir;
uint32_t dl;
uint32_t timeout_sec;
+ __le32 exchange_address;
+ __le16 nport_handle;
+ __le16 ox_id;
struct list_head entry;
} nvme;
struct {
@@ -706,6 +730,10 @@ typedef struct srb {
struct fc_port *fcport;
struct scsi_qla_host *vha;
unsigned int start_timer:1;
+ unsigned int abort:1;
+ unsigned int aborted:1;
+ unsigned int completed:1;
+ unsigned int unsol_rsp:1;
uint32_t handle;
uint16_t flags;
@@ -2541,7 +2569,7 @@ enum rscn_addr_format {
typedef struct fc_port {
struct list_head list;
struct scsi_qla_host *vha;
- struct list_head tmf_pending;
+ struct list_head unsol_ctx_head;
unsigned int conf_compl_supported:1;
unsigned int deleted:2;
@@ -2562,9 +2590,6 @@ typedef struct fc_port {
unsigned int do_prli_nvme:1;
uint8_t nvme_flag;
- uint8_t active_tmf;
-#define MAX_ACTIVE_TMF 8
-
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
port_id_t d_id;
@@ -3745,6 +3770,16 @@ struct qla_fw_resources {
u16 pad;
};
+struct qla_fw_res {
+ u16 iocb_total;
+ u16 iocb_limit;
+ atomic_t iocb_used;
+
+ u16 exch_total;
+ u16 exch_limit;
+ atomic_t exch_used;
+};
+
#define QLA_IOCB_PCT_LIMIT 95
struct qla_buf_pool {
@@ -3790,6 +3825,12 @@ struct qla_qpair {
uint16_t id; /* qp number used with FW */
uint16_t vp_idx; /* vport ID */
+
+ uint16_t dsd_inuse;
+ uint16_t dsd_avail;
+ struct list_head dsd_list;
+#define NUM_DSD_CHAIN 4096
+
mempool_t *srb_mempool;
struct pci_dev *pdev;
@@ -4387,7 +4428,6 @@ struct qla_hw_data {
uint8_t aen_mbx_count;
atomic_t num_pend_mbx_stage1;
atomic_t num_pend_mbx_stage2;
- atomic_t num_pend_mbx_stage3;
uint16_t frame_payload_size;
uint32_t login_retry_count;
@@ -4656,6 +4696,8 @@ struct qla_hw_data {
uint32_t flt_region_aux_img_status_sec;
};
uint8_t active_image;
+ uint8_t active_tmf;
+#define MAX_ACTIVE_TMF 8
/* Needed for BEACON */
uint16_t beacon_blink_led;
@@ -4670,6 +4712,8 @@ struct qla_hw_data {
struct qla_msix_entry *msix_entries;
+ struct list_head tmf_pending;
+ struct list_head tmf_active;
struct list_head vp_list; /* list of VP */
unsigned long vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) /
sizeof(unsigned long)];
@@ -4713,11 +4757,6 @@ struct qla_hw_data {
struct fw_blob *hablob;
struct qla82xx_legacy_intr_set nx_legacy_intr;
- uint16_t gbl_dsd_inuse;
- uint16_t gbl_dsd_avail;
- struct list_head gbl_dsd_list;
-#define NUM_DSD_CHAIN 4096
-
uint8_t fw_type;
uint32_t file_prd_off; /* File firmware product offset */
@@ -4799,6 +4838,8 @@ struct qla_hw_data {
struct els_reject elsrej;
u8 edif_post_stop_cnt_down;
struct qla_vp_map *vp_map;
+ struct qla_nvme_fc_rjt lsrjt;
+ struct qla_fw_res fwres ____cacheline_aligned;
};
#define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES))
@@ -4831,6 +4872,7 @@ struct active_regions {
* is variable) starting at "iocb".
*/
struct purex_item {
+ void *purls_context;
struct list_head list;
struct scsi_qla_host *vha;
void (*process_item)(struct scsi_qla_host *vha,
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 1925cc6897b6..a7a364760b80 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -116,7 +116,7 @@ qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp)
sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name));
fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root);
- if (!fp->dfs_rport_dir)
+ if (IS_ERR(fp->dfs_rport_dir))
return;
if (NVME_TARGET(vha->hw, fp))
debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir,
@@ -276,6 +276,16 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
seq_printf(s, "estimate exchange used[%d] high water limit [%d] n",
exch_used, ha->base_qpair->fwres.exch_limit);
+
+ if (ql2xenforce_iocb_limit == 2) {
+ iocbs_used = atomic_read(&ha->fwres.iocb_used);
+ exch_used = atomic_read(&ha->fwres.exch_used);
+ seq_printf(s, " estimate iocb2 used [%d] high water limit [%d]\n",
+ iocbs_used, ha->fwres.iocb_limit);
+
+ seq_printf(s, " estimate exchange2 used[%d] high water limit [%d] \n",
+ exch_used, ha->fwres.exch_limit);
+ }
}
return 0;
@@ -698,14 +708,14 @@ create_nodes:
if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) {
ha->tgt.dfs_naqp = debugfs_create_file("naqp",
0400, ha->dfs_dir, vha, &dfs_naqp_ops);
- if (!ha->tgt.dfs_naqp) {
+ if (IS_ERR(ha->tgt.dfs_naqp)) {
ql_log(ql_log_warn, vha, 0xd011,
"Unable to create debugFS naqp node.\n");
goto out;
}
}
vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir);
- if (!vha->dfs_rport_root) {
+ if (IS_ERR(vha->dfs_rport_root)) {
ql_log(ql_log_warn, vha, 0xd012,
"Unable to create debugFS rports node.\n");
goto out;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index ba7831f24734..09cb9413670a 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -48,8 +48,6 @@ extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *, bool);
extern void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha,
struct els_plogi *els_plogi);
-extern void qla2x00_update_fcports(scsi_qla_host_t *);
-
extern int qla2x00_abort_isp(scsi_qla_host_t *);
extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *);
extern void qla2x00_quiesce_io(scsi_qla_host_t *);
@@ -143,6 +141,7 @@ void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess);
void qla_edif_clear_appdata(struct scsi_qla_host *vha,
struct fc_port *fcport);
const char *sc_to_str(uint16_t cmd);
+void qla_adjust_iocb_limit(scsi_qla_host_t *vha);
/*
* Global Data in qla_os.c source file.
@@ -205,8 +204,6 @@ extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
-extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *,
- fc_port_t *, uint16_t *);
extern int qla2x00_set_exlogins_buffer(struct scsi_qla_host *);
extern void qla2x00_free_exlogin_buffer(struct qla_hw_data *);
extern int qla2x00_set_exchoffld_buffer(struct scsi_qla_host *);
@@ -216,7 +213,6 @@ extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
extern struct scsi_qla_host *qla2x00_create_host(const struct scsi_host_template *,
struct qla_hw_data *);
-extern void qla2x00_free_host(struct scsi_qla_host *);
extern void qla2x00_relogin(struct scsi_qla_host *);
extern void qla2x00_do_work(struct scsi_qla_host *);
extern void qla2x00_free_fcports(struct scsi_qla_host *);
@@ -238,13 +234,10 @@ extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
-extern void qla_eeh_work(struct work_struct *);
extern void qla2x00_sp_compl(srb_t *sp, int);
extern void qla2xxx_qpair_sp_free_dma(srb_t *sp);
extern void qla2xxx_qpair_sp_compl(srb_t *sp, int);
extern void qla24xx_sched_upd_fcport(fc_port_t *);
-void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
- uint16_t *);
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
@@ -610,7 +603,11 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id);
fc_port_t *qla2x00_find_fcport_by_loopid(scsi_qla_host_t *, uint16_t);
fc_port_t *qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *, u8 *, u8);
fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8);
-void __qla_consume_iocb(struct scsi_qla_host *vha, void **pkt, struct rsp_que **rsp);
+void qla24xx_queue_purex_item(scsi_qla_host_t *, struct purex_item *,
+ void (*process_item)(struct scsi_qla_host *,
+ struct purex_item *));
+void __qla_consume_iocb(struct scsi_qla_host *, void **, struct rsp_que **);
+void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp);
/*
* Global Function Prototypes in qla_sup.c source file.
@@ -673,9 +670,11 @@ extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *);
extern int qla2x00_mailbox_passthru(struct bsg_job *bsg_job);
-int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha, void **pkt,
- struct rsp_que **rsp, u8 *buf, u32 buf_len);
-
+int qla2x00_sys_ld_info(struct bsg_job *bsg_job);
+int __qla_copy_purex_to_buffer(struct scsi_qla_host *, void **,
+ struct rsp_que **, u8 *, u32);
+struct purex_item *qla27xx_copy_multiple_pkt(struct scsi_qla_host *vha,
+ void **pkt, struct rsp_que **rsp, bool is_purls, bool byte_order);
int qla_mailbox_passthru(scsi_qla_host_t *vha, uint16_t *mbx_in,
uint16_t *mbx_out);
@@ -728,7 +727,6 @@ int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *);
int qla2x00_mgmt_svr_login(scsi_qla_host_t *);
-void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea);
int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool);
int qla24xx_async_gpnft(scsi_qla_host_t *, u8, srb_t *);
void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *);
@@ -851,7 +849,6 @@ extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
/* Interrupt related */
extern irqreturn_t qla82xx_intr_handler(int, void *);
-extern irqreturn_t qla82xx_msi_handler(int, void *);
extern irqreturn_t qla82xx_msix_default(int, void *);
extern irqreturn_t qla82xx_msix_rsp_q(int, void *);
extern void qla82xx_enable_intrs(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 367fba27fe69..a314cfc5b263 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -508,6 +508,7 @@ static
void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
struct fc_port *fcport = ea->fcport;
+ unsigned long flags;
ql_dbg(ql_dbg_disc, vha, 0x20d2,
"%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
@@ -522,9 +523,15 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
ql_dbg(ql_dbg_disc, vha, 0x2066,
"%s %8phC: adisc fail: post delete\n",
__func__, ea->fcport->port_name);
+
+ spin_lock_irqsave(&vha->work_lock, flags);
/* deleted = 0 & logout_on_delete = force fw cleanup */
- fcport->deleted = 0;
+ if (fcport->deleted == QLA_SESS_DELETED)
+ fcport->deleted = 0;
+
fcport->logout_on_delete = 1;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
qlt_schedule_sess_for_deletion(ea->fcport);
return;
}
@@ -1134,7 +1141,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
u16 *mb;
if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
- return rval;
+ goto done;
ql_dbg(ql_dbg_disc, vha, 0x20d9,
"Async-gnlist WWPN %8phC \n", fcport->port_name);
@@ -1188,8 +1195,9 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
done_free_sp:
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ fcport->flags &= ~(FCF_ASYNC_SENT);
done:
- fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT);
+ fcport->flags &= ~(FCF_ASYNC_ACTIVE);
return rval;
}
@@ -1446,7 +1454,6 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
ea->fcport->login_gen++;
- ea->fcport->deleted = 0;
ea->fcport->logout_on_delete = 1;
if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
@@ -1996,12 +2003,11 @@ qla2x00_tmf_iocb_timeout(void *data)
int rc, h;
unsigned long flags;
- if (sp->type == SRB_MARKER) {
- complete(&tmf->u.tmf.comp);
- return;
- }
+ if (sp->type == SRB_MARKER)
+ rc = QLA_FUNCTION_FAILED;
+ else
+ rc = qla24xx_async_abort_cmd(sp, false);
- rc = qla24xx_async_abort_cmd(sp, false);
if (rc) {
spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
@@ -2032,10 +2038,14 @@ static void qla_marker_sp_done(srb_t *sp, int res)
complete(&tmf->u.tmf.comp);
}
-#define START_SP_W_RETRIES(_sp, _rval) \
+#define START_SP_W_RETRIES(_sp, _rval, _chip_gen, _login_gen) \
{\
int cnt = 5; \
do { \
+ if (_chip_gen != sp->vha->hw->chip_reset || _login_gen != sp->fcport->login_gen) {\
+ _rval = EINVAL; \
+ break; \
+ } \
_rval = qla2x00_start_sp(_sp); \
if (_rval == EAGAIN) \
msleep(1); \
@@ -2058,6 +2068,7 @@ qla26xx_marker(struct tmf_arg *arg)
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
fc_port_t *fcport = arg->fcport;
+ u32 chip_gen, login_gen;
if (TMF_NOT_READY(arg->fcport)) {
ql_dbg(ql_dbg_taskm, vha, 0x8039,
@@ -2067,6 +2078,9 @@ qla26xx_marker(struct tmf_arg *arg)
return QLA_SUSPENDED;
}
+ chip_gen = vha->hw->chip_reset;
+ login_gen = fcport->login_gen;
+
/* ref: INIT */
sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
if (!sp)
@@ -2084,7 +2098,7 @@ qla26xx_marker(struct tmf_arg *arg)
tm_iocb->u.tmf.loop_id = fcport->loop_id;
tm_iocb->u.tmf.vp_index = vha->vp_idx;
- START_SP_W_RETRIES(sp, rval);
+ START_SP_W_RETRIES(sp, rval, chip_gen, login_gen);
ql_dbg(ql_dbg_taskm, vha, 0x8006,
"Async-marker hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n",
@@ -2123,6 +2137,17 @@ static void qla2x00_tmf_sp_done(srb_t *sp, int res)
complete(&tmf->u.tmf.comp);
}
+static int qla_tmf_wait(struct tmf_arg *arg)
+{
+ /* there are only 2 types of error handling that reaches here, lun or target reset */
+ if (arg->flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET | TCF_CLEAR_TASK_SET))
+ return qla2x00_eh_wait_for_pending_commands(arg->vha,
+ arg->fcport->d_id.b24, arg->lun, WAIT_LUN);
+ else
+ return qla2x00_eh_wait_for_pending_commands(arg->vha,
+ arg->fcport->d_id.b24, arg->lun, WAIT_TARGET);
+}
+
static int
__qla2x00_async_tm_cmd(struct tmf_arg *arg)
{
@@ -2130,8 +2155,9 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
struct srb_iocb *tm_iocb;
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
-
fc_port_t *fcport = arg->fcport;
+ u32 chip_gen, login_gen;
+ u64 jif;
if (TMF_NOT_READY(arg->fcport)) {
ql_dbg(ql_dbg_taskm, vha, 0x8032,
@@ -2141,6 +2167,9 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
return QLA_SUSPENDED;
}
+ chip_gen = vha->hw->chip_reset;
+ login_gen = fcport->login_gen;
+
/* ref: INIT */
sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
if (!sp)
@@ -2158,7 +2187,7 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
tm_iocb->u.tmf.flags = arg->flags;
tm_iocb->u.tmf.lun = arg->lun;
- START_SP_W_RETRIES(sp, rval);
+ START_SP_W_RETRIES(sp, rval, chip_gen, login_gen);
ql_dbg(ql_dbg_taskm, vha, 0x802f,
"Async-tmf hdl=%x loop-id=%x portid=%06x ctrl=%x lun=%lld qp=%d rval=%x.\n",
@@ -2176,8 +2205,26 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
"TM IOCB failed (%x).\n", rval);
}
- if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw))
- rval = qla26xx_marker(arg);
+ if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
+ jif = jiffies;
+ if (qla_tmf_wait(arg)) {
+ ql_log(ql_log_info, vha, 0x803e,
+ "Waited %u ms Nexus=%ld:%06x:%llu.\n",
+ jiffies_to_msecs(jiffies - jif), vha->host_no,
+ fcport->d_id.b24, arg->lun);
+ }
+
+ if (chip_gen == vha->hw->chip_reset && login_gen == fcport->login_gen) {
+ rval = qla26xx_marker(arg);
+ } else {
+ ql_log(ql_log_info, vha, 0x803e,
+ "Skip Marker due to disruption. Nexus=%ld:%06x:%llu.\n",
+ vha->host_no, fcport->d_id.b24, arg->lun);
+ rval = QLA_FUNCTION_FAILED;
+ }
+ }
+ if (tm_iocb->u.tmf.data)
+ rval = tm_iocb->u.tmf.data;
done_free_sp:
/* ref: INIT */
@@ -2186,30 +2233,42 @@ done:
return rval;
}
-static void qla_put_tmf(fc_port_t *fcport)
+static void qla_put_tmf(struct tmf_arg *arg)
{
- struct scsi_qla_host *vha = fcport->vha;
+ struct scsi_qla_host *vha = arg->vha;
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- fcport->active_tmf--;
+ ha->active_tmf--;
+ list_del(&arg->tmf_elem);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
static
-int qla_get_tmf(fc_port_t *fcport)
+int qla_get_tmf(struct tmf_arg *arg)
{
- struct scsi_qla_host *vha = fcport->vha;
+ struct scsi_qla_host *vha = arg->vha;
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
+ fc_port_t *fcport = arg->fcport;
int rc = 0;
- LIST_HEAD(tmf_elem);
+ struct tmf_arg *t;
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- list_add_tail(&tmf_elem, &fcport->tmf_pending);
+ list_for_each_entry(t, &ha->tmf_active, tmf_elem) {
+ if (t->fcport == arg->fcport && t->lun == arg->lun) {
+ /* reject duplicate TMF */
+ ql_log(ql_log_warn, vha, 0x802c,
+ "found duplicate TMF. Nexus=%ld:%06x:%llu.\n",
+ vha->host_no, fcport->d_id.b24, arg->lun);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ return -EINVAL;
+ }
+ }
- while (fcport->active_tmf >= MAX_ACTIVE_TMF) {
+ list_add_tail(&arg->tmf_elem, &ha->tmf_pending);
+ while (ha->active_tmf >= MAX_ACTIVE_TMF) {
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
msleep(1);
@@ -2221,15 +2280,17 @@ int qla_get_tmf(fc_port_t *fcport)
rc = EIO;
break;
}
- if (fcport->active_tmf < MAX_ACTIVE_TMF &&
- list_is_first(&tmf_elem, &fcport->tmf_pending))
+ if (ha->active_tmf < MAX_ACTIVE_TMF &&
+ list_is_first(&arg->tmf_elem, &ha->tmf_pending))
break;
}
- list_del(&tmf_elem);
+ list_del(&arg->tmf_elem);
- if (!rc)
- fcport->active_tmf++;
+ if (!rc) {
+ ha->active_tmf++;
+ list_add_tail(&arg->tmf_elem, &ha->tmf_active);
+ }
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
@@ -2241,9 +2302,8 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun,
uint32_t tag)
{
struct scsi_qla_host *vha = fcport->vha;
- struct qla_qpair *qpair;
struct tmf_arg a;
- int i, rval = QLA_SUCCESS;
+ int rval = QLA_SUCCESS;
if (TMF_NOT_READY(fcport))
return QLA_SUSPENDED;
@@ -2251,47 +2311,22 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun,
a.vha = fcport->vha;
a.fcport = fcport;
a.lun = lun;
+ a.flags = flags;
+ INIT_LIST_HEAD(&a.tmf_elem);
+
if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET|TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) {
a.modifier = MK_SYNC_ID_LUN;
-
- if (qla_get_tmf(fcport))
- return QLA_FUNCTION_FAILED;
} else {
a.modifier = MK_SYNC_ID;
}
- if (vha->hw->mqenable) {
- for (i = 0; i < vha->hw->num_qpairs; i++) {
- qpair = vha->hw->queue_pair_map[i];
- if (!qpair)
- continue;
-
- if (TMF_NOT_READY(fcport)) {
- ql_log(ql_log_warn, vha, 0x8026,
- "Unable to send TM due to disruption.\n");
- rval = QLA_SUSPENDED;
- break;
- }
-
- a.qpair = qpair;
- a.flags = flags|TCF_NOTMCMD_TO_TARGET;
- rval = __qla2x00_async_tm_cmd(&a);
- if (rval)
- break;
- }
- }
-
- if (rval)
- goto bailout;
+ if (qla_get_tmf(&a))
+ return QLA_FUNCTION_FAILED;
a.qpair = vha->hw->base_qpair;
- a.flags = flags;
rval = __qla2x00_async_tm_cmd(&a);
-bailout:
- if (a.modifier == MK_SYNC_ID_LUN)
- qla_put_tmf(fcport);
-
+ qla_put_tmf(&a);
return rval;
}
@@ -4147,39 +4182,61 @@ out:
return ha->flags.lr_detected;
}
-void qla_init_iocb_limit(scsi_qla_host_t *vha)
+static void __qla_adjust_iocb_limit(struct qla_qpair *qpair)
{
- u16 i, num_qps;
- u32 limit;
- struct qla_hw_data *ha = vha->hw;
+ u8 num_qps;
+ u16 limit;
+ struct qla_hw_data *ha = qpair->vha->hw;
num_qps = ha->num_qpairs + 1;
limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
- ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
- ha->base_qpair->fwres.iocbs_limit = limit;
- ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps;
- ha->base_qpair->fwres.iocbs_used = 0;
+ qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
+ qpair->fwres.iocbs_limit = limit;
+ qpair->fwres.iocbs_qp_limit = limit / num_qps;
+
+ qpair->fwres.exch_total = ha->orig_fw_xcb_count;
+ qpair->fwres.exch_limit = (ha->orig_fw_xcb_count *
+ QLA_IOCB_PCT_LIMIT) / 100;
+}
- ha->base_qpair->fwres.exch_total = ha->orig_fw_xcb_count;
- ha->base_qpair->fwres.exch_limit = (ha->orig_fw_xcb_count *
- QLA_IOCB_PCT_LIMIT) / 100;
+void qla_init_iocb_limit(scsi_qla_host_t *vha)
+{
+ u8 i;
+ struct qla_hw_data *ha = vha->hw;
+
+ __qla_adjust_iocb_limit(ha->base_qpair);
+ ha->base_qpair->fwres.iocbs_used = 0;
ha->base_qpair->fwres.exch_used = 0;
for (i = 0; i < ha->max_qpairs; i++) {
if (ha->queue_pair_map[i]) {
- ha->queue_pair_map[i]->fwres.iocbs_total =
- ha->orig_fw_iocb_count;
- ha->queue_pair_map[i]->fwres.iocbs_limit = limit;
- ha->queue_pair_map[i]->fwres.iocbs_qp_limit =
- limit / num_qps;
+ __qla_adjust_iocb_limit(ha->queue_pair_map[i]);
ha->queue_pair_map[i]->fwres.iocbs_used = 0;
- ha->queue_pair_map[i]->fwres.exch_total = ha->orig_fw_xcb_count;
- ha->queue_pair_map[i]->fwres.exch_limit =
- (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
ha->queue_pair_map[i]->fwres.exch_used = 0;
}
}
+
+ ha->fwres.iocb_total = ha->orig_fw_iocb_count;
+ ha->fwres.iocb_limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
+ ha->fwres.exch_total = ha->orig_fw_xcb_count;
+ ha->fwres.exch_limit = (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
+
+ atomic_set(&ha->fwres.iocb_used, 0);
+ atomic_set(&ha->fwres.exch_used, 0);
+}
+
+void qla_adjust_iocb_limit(scsi_qla_host_t *vha)
+{
+ u8 i;
+ struct qla_hw_data *ha = vha->hw;
+
+ __qla_adjust_iocb_limit(ha->base_qpair);
+
+ for (i = 0; i < ha->max_qpairs; i++) {
+ if (ha->queue_pair_map[i])
+ __qla_adjust_iocb_limit(ha->queue_pair_map[i]);
+ }
}
/**
@@ -4777,15 +4834,16 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
if (ha->flags.edif_enabled)
mid_init_cb->init_cb.frame_payload_size = cpu_to_le16(ELS_MAX_PAYLOAD);
+ QLA_FW_STARTED(ha);
rval = qla2x00_init_firmware(vha, ha->init_cb_size);
next_check:
if (rval) {
+ QLA_FW_STOPPED(ha);
ql_log(ql_log_fatal, vha, 0x00d2,
"Init Firmware **** FAILED ****.\n");
} else {
ql_dbg(ql_dbg_init, vha, 0x00d3,
"Init Firmware -- success.\n");
- QLA_FW_STARTED(ha);
vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
}
@@ -5506,7 +5564,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
INIT_LIST_HEAD(&fcport->gnl_entry);
INIT_LIST_HEAD(&fcport->list);
- INIT_LIST_HEAD(&fcport->tmf_pending);
+ INIT_LIST_HEAD(&fcport->unsol_ctx_head);
INIT_LIST_HEAD(&fcport->sess_cmd_list);
spin_lock_init(&fcport->sess_cmd_lock);
@@ -5549,7 +5607,7 @@ static void qla_get_login_template(scsi_qla_host_t *vha)
__be32 *q;
memset(ha->init_cb, 0, ha->init_cb_size);
- sz = min_t(int, sizeof(struct fc_els_csp), ha->init_cb_size);
+ sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size);
rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
ha->init_cb, sz);
if (rval != QLA_SUCCESS) {
@@ -6090,6 +6148,8 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
void
qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
+ unsigned long flags;
+
if (IS_SW_RESV_ADDR(fcport->d_id))
return;
@@ -6099,7 +6159,11 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
fcport->login_retry = vha->hw->login_retry_count;
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+
+ spin_lock_irqsave(&vha->work_lock, flags);
fcport->deleted = 0;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
if (vha->hw->current_topology == ISP_CFG_NL)
fcport->logout_on_delete = 0;
else
@@ -7337,14 +7401,15 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
}
/* purge MBox commands */
- if (atomic_read(&ha->num_pend_mbx_stage3)) {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) {
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
complete(&ha->mbx_intr_comp);
}
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
i = 0;
- while (atomic_read(&ha->num_pend_mbx_stage3) ||
- atomic_read(&ha->num_pend_mbx_stage2) ||
+ while (atomic_read(&ha->num_pend_mbx_stage2) ||
atomic_read(&ha->num_pend_mbx_stage1)) {
msleep(20);
i++;
@@ -9590,6 +9655,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
qpair->vp_idx = vp_idx;
qpair->fw_started = ha->flags.fw_started;
INIT_LIST_HEAD(&qpair->hints_list);
+ INIT_LIST_HEAD(&qpair->dsd_list);
qpair->chip_reset = ha->base_qpair->chip_reset;
qpair->enable_class_2 = ha->base_qpair->enable_class_2;
qpair->enable_explicit_conf =
@@ -9718,6 +9784,19 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
if (ret != QLA_SUCCESS)
goto fail;
+ if (!list_empty(&qpair->dsd_list)) {
+ struct dsd_dma *dsd_ptr, *tdsd_ptr;
+
+ /* clean up allocated prev pool */
+ list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
+ &qpair->dsd_list, list) {
+ dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
+ dsd_ptr->dsd_list_dma);
+ list_del(&dsd_ptr->list);
+ kfree(dsd_ptr);
+ }
+ }
+
mutex_lock(&ha->mq_lock);
ha->queue_pair_map[qpair->id] = NULL;
clear_bit(qpair->id, ha->qpair_qid_map);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 0167e85ba058..a4a56ab0ba74 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -386,6 +386,7 @@ enum {
RESOURCE_IOCB = BIT_0,
RESOURCE_EXCH = BIT_1, /* exchange */
RESOURCE_FORCE = BIT_2,
+ RESOURCE_HA = BIT_3,
};
static inline int
@@ -393,7 +394,7 @@ qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
{
u16 iocbs_used, i;
u16 exch_used;
- struct qla_hw_data *ha = qp->vha->hw;
+ struct qla_hw_data *ha = qp->hw;
if (!ql2xenforce_iocb_limit) {
iores->res_type = RESOURCE_NONE;
@@ -428,15 +429,69 @@ qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
return -ENOSPC;
}
}
+
+ if (ql2xenforce_iocb_limit == 2) {
+ if ((iores->iocb_cnt + atomic_read(&ha->fwres.iocb_used)) >=
+ ha->fwres.iocb_limit) {
+ iores->res_type = RESOURCE_NONE;
+ return -ENOSPC;
+ }
+
+ if (iores->res_type & RESOURCE_EXCH) {
+ if ((iores->exch_cnt + atomic_read(&ha->fwres.exch_used)) >=
+ ha->fwres.exch_limit) {
+ iores->res_type = RESOURCE_NONE;
+ return -ENOSPC;
+ }
+ }
+ }
+
force:
qp->fwres.iocbs_used += iores->iocb_cnt;
qp->fwres.exch_used += iores->exch_cnt;
+ if (ql2xenforce_iocb_limit == 2) {
+ atomic_add(iores->iocb_cnt, &ha->fwres.iocb_used);
+ atomic_add(iores->exch_cnt, &ha->fwres.exch_used);
+ iores->res_type |= RESOURCE_HA;
+ }
return 0;
}
+/*
+ * decrement to zero. This routine will not decrement below zero
+ * @v: pointer of type atomic_t
+ * @amount: amount to decrement from v
+ */
+static void qla_atomic_dtz(atomic_t *v, int amount)
+{
+ int c, old, dec;
+
+ c = atomic_read(v);
+ for (;;) {
+ dec = c - amount;
+ if (unlikely(dec < 0))
+ dec = 0;
+
+ old = atomic_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+}
+
static inline void
qla_put_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
{
+ struct qla_hw_data *ha = qp->hw;
+
+ if (iores->res_type & RESOURCE_HA) {
+ if (iores->res_type & RESOURCE_IOCB)
+ qla_atomic_dtz(&ha->fwres.iocb_used, iores->iocb_cnt);
+
+ if (iores->res_type & RESOURCE_EXCH)
+ qla_atomic_dtz(&ha->fwres.exch_used, iores->exch_cnt);
+ }
+
if (iores->res_type & RESOURCE_IOCB) {
if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
qp->fwres.iocbs_used -= iores->iocb_cnt;
@@ -522,7 +577,7 @@ fcport_is_bigger(fc_port_t *fcport)
static inline struct qla_qpair *
qla_mapq_nvme_select_qpair(struct qla_hw_data *ha, struct qla_qpair *qpair)
{
- int cpuid = smp_processor_id();
+ int cpuid = raw_smp_processor_id();
if (qpair->cpuid != cpuid &&
ha->qp_cpu_map[cpuid]) {
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 730d8609276c..df90169f8244 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -11,6 +11,7 @@
#include <scsi/scsi_tcq.h>
+static int qla_start_scsi_type6(srb_t *sp);
/**
* qla2x00_get_cmd_direction() - Determine control_flag data direction.
* @sp: SCSI command
@@ -590,8 +591,6 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
uint16_t tot_dsds)
{
struct dsd64 *cur_dsd = NULL, *next_dsd;
- scsi_qla_host_t *vha;
- struct qla_hw_data *ha;
struct scsi_cmnd *cmd;
struct scatterlist *cur_seg;
uint8_t avail_dsds;
@@ -613,9 +612,6 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
return 0;
}
- vha = sp->vha;
- ha = vha->hw;
-
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
@@ -636,14 +632,13 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
tot_dsds -= avail_dsds;
dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
- dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
- struct dsd_dma, list);
+ dsd_ptr = list_first_entry(&qpair->dsd_list, struct dsd_dma, list);
next_dsd = dsd_ptr->dsd_addr;
list_del(&dsd_ptr->list);
- ha->gbl_dsd_avail--;
+ qpair->dsd_avail--;
list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
ctx->dsd_use_cnt++;
- ha->gbl_dsd_inuse++;
+ qpair->dsd_inuse++;
if (first_iocb) {
first_iocb = 0;
@@ -1722,6 +1717,8 @@ qla24xx_dif_start_scsi(srb_t *sp)
if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
if (cmd->cmd_len <= 16)
return qla24xx_start_scsi(sp);
+ else
+ return qla_start_scsi_type6(sp);
}
/* Setup device pointers. */
@@ -2101,6 +2098,8 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
if (cmd->cmd_len <= 16)
return qla2xxx_start_scsi_mq(sp);
+ else
+ return qla_start_scsi_type6(sp);
}
spin_lock_irqsave(&qpair->qp_lock, flags);
@@ -3368,6 +3367,7 @@ qla82xx_start_scsi(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
+ struct qla_qpair *qpair = sp->qpair;
/* Setup device pointers. */
reg = &ha->iobase->isp82;
@@ -3416,18 +3416,18 @@ qla82xx_start_scsi(srb_t *sp)
uint16_t i;
more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
- if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
+ if ((more_dsd_lists + qpair->dsd_inuse) >= NUM_DSD_CHAIN) {
ql_dbg(ql_dbg_io, vha, 0x300d,
"Num of DSD list %d is than %d for cmd=%p.\n",
- more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
+ more_dsd_lists + qpair->dsd_inuse, NUM_DSD_CHAIN,
cmd);
goto queuing_error;
}
- if (more_dsd_lists <= ha->gbl_dsd_avail)
+ if (more_dsd_lists <= qpair->dsd_avail)
goto sufficient_dsds;
else
- more_dsd_lists -= ha->gbl_dsd_avail;
+ more_dsd_lists -= qpair->dsd_avail;
for (i = 0; i < more_dsd_lists; i++) {
dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
@@ -3447,8 +3447,8 @@ qla82xx_start_scsi(srb_t *sp)
"for cmd=%p.\n", cmd);
goto queuing_error;
}
- list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
- ha->gbl_dsd_avail++;
+ list_add_tail(&dsd_ptr->list, &qpair->dsd_list);
+ qpair->dsd_avail++;
}
sufficient_dsds:
@@ -3767,21 +3767,28 @@ qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
nvme = &sp->u.iocb_cmd;
cmd_pkt->entry_type = PT_LS4_REQUEST;
cmd_pkt->entry_count = 1;
- cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
-
cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
- cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+ if (sp->unsol_rsp) {
+ cmd_pkt->control_flags =
+ cpu_to_le16(CF_LS4_RESPONDER << CF_LS4_SHIFT);
+ cmd_pkt->nport_handle = nvme->u.nvme.nport_handle;
+ cmd_pkt->exchange_address = nvme->u.nvme.exchange_address;
+ } else {
+ cmd_pkt->control_flags =
+ cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->rx_dseg_count = cpu_to_le16(1);
+ cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
+ cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len;
+ put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
+ }
+
cmd_pkt->tx_dseg_count = cpu_to_le16(1);
- cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
- cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
+ cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
+ cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
-
- cmd_pkt->rx_dseg_count = cpu_to_le16(1);
- cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
- cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
- put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
}
static void
@@ -3882,6 +3889,7 @@ qla_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk)
{
mrk->entry_type = MARKER_TYPE;
mrk->modifier = sp->u.iocb_cmd.u.tmf.modifier;
+ mrk->handle = make_handle(sp->qpair->req->id, sp->handle);
if (sp->u.iocb_cmd.u.tmf.modifier != MK_SYNC_ALL) {
mrk->nport_handle = cpu_to_le16(sp->u.iocb_cmd.u.tmf.loop_id);
int_to_scsilun(sp->u.iocb_cmd.u.tmf.lun, (struct scsi_lun *)&mrk->lun);
@@ -4197,3 +4205,267 @@ queuing_error:
return rval;
}
+
+/**
+ * qla_start_scsi_type6() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+static int
+qla_start_scsi_type6(srb_t *sp)
+{
+ int nseg;
+ unsigned long flags;
+ uint32_t *clr_ptr;
+ uint32_t handle;
+ struct cmd_type_6 *cmd_pkt;
+ uint16_t cnt;
+ uint16_t req_cnt;
+ uint16_t tot_dsds;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qpair = sp->qpair;
+ uint16_t more_dsd_lists = 0;
+ struct dsd_dma *dsd_ptr;
+ uint16_t i;
+ __be32 *fcp_dl;
+ uint8_t additional_cdb_len;
+ struct ct6_dsd *ctx;
+
+ /* Acquire qpair specific lock */
+ spin_lock_irqsave(&qpair->qp_lock, flags);
+
+ /* Setup qpair pointers */
+ req = qpair->req;
+ rsp = qpair->rsp;
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+ return QLA_FUNCTION_FAILED;
+ }
+ vha->marker_needed = 0;
+ }
+
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0)
+ goto queuing_error;
+
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ } else {
+ nseg = 0;
+ }
+
+ tot_dsds = nseg;
+
+ /* eventhough driver only need 1 T6 IOCB, FW still convert DSD to Continueation IOCB */
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+ sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
+ sp->iores.exch_cnt = 1;
+ sp->iores.iocb_cnt = req_cnt;
+
+ if (qla_get_fw_resources(sp->qpair, &sp->iores))
+ goto queuing_error;
+
+ more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
+ if ((more_dsd_lists + qpair->dsd_inuse) >= NUM_DSD_CHAIN) {
+ ql_dbg(ql_dbg_io, vha, 0x3028,
+ "Num of DSD list %d is than %d for cmd=%p.\n",
+ more_dsd_lists + qpair->dsd_inuse, NUM_DSD_CHAIN, cmd);
+ goto queuing_error;
+ }
+
+ if (more_dsd_lists <= qpair->dsd_avail)
+ goto sufficient_dsds;
+ else
+ more_dsd_lists -= qpair->dsd_avail;
+
+ for (i = 0; i < more_dsd_lists; i++) {
+ dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
+ if (!dsd_ptr) {
+ ql_log(ql_log_fatal, vha, 0x3029,
+ "Failed to allocate memory for dsd_dma for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+ INIT_LIST_HEAD(&dsd_ptr->list);
+
+ dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
+ GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
+ if (!dsd_ptr->dsd_addr) {
+ kfree(dsd_ptr);
+ ql_log(ql_log_fatal, vha, 0x302a,
+ "Failed to allocate memory for dsd_addr for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+ list_add_tail(&dsd_ptr->list, &qpair->dsd_list);
+ qpair->dsd_avail++;
+ }
+
+sufficient_dsds:
+ req_cnt = 1;
+
+ if (req->cnt < (req_cnt + 2)) {
+ if (IS_SHADOW_REG_CAPABLE(ha)) {
+ cnt = *req->out_ptr;
+ } else {
+ cnt = (uint16_t)rd_reg_dword_relaxed(req->req_q_out);
+ if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+ goto queuing_error;
+ }
+
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length - (req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+ }
+
+ ctx = &sp->u.scmd.ct6_ctx;
+
+ memset(ctx, 0, sizeof(struct ct6_dsd));
+ ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
+ GFP_ATOMIC, &ctx->fcp_cmnd_dma);
+ if (!ctx->fcp_cmnd) {
+ ql_log(ql_log_fatal, vha, 0x3031,
+ "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+
+ /* Initialize the DSD list and dma handle */
+ INIT_LIST_HEAD(&ctx->dsd_list);
+ ctx->dsd_use_cnt = 0;
+
+ if (cmd->cmd_len > 16) {
+ additional_cdb_len = cmd->cmd_len - 16;
+ if (cmd->cmd_len % 4 ||
+ cmd->cmd_len > QLA_CDB_BUF_SIZE) {
+ /*
+ * SCSI command bigger than 16 bytes must be
+ * multiple of 4 or too big.
+ */
+ ql_log(ql_log_warn, vha, 0x3033,
+ "scsi cmd len %d not multiple of 4 for cmd=%p.\n",
+ cmd->cmd_len, cmd);
+ goto queuing_error_fcp_cmnd;
+ }
+ ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
+ } else {
+ additional_cdb_len = 0;
+ ctx->fcp_cmnd_len = 12 + 16 + 4;
+ }
+
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+
+ cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
+ cmd_pkt->handle = make_handle(req->id, handle);
+
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number */
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->vha->vp_idx;
+
+ /* Build IOCB segments */
+ qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds);
+
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ /* build FCP_CMND IU */
+ int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
+ ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 1;
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 2;
+
+ /* Populate the FCP_PRIO. */
+ if (ha->flags.fcp_prio_enabled)
+ ctx->fcp_cmnd->task_attribute |=
+ sp->fcport->fcp_prio << 3;
+
+ memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
+
+ fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
+ additional_cdb_len);
+ *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
+
+ cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
+ put_unaligned_le64(ctx->fcp_cmnd_dma,
+ &cmd_pkt->fcp_cmnd_dseg_address);
+
+ sp->flags |= SRB_FCP_CMND_DMA_VALID;
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+
+ wmb();
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else {
+ req->ring_ptr++;
+ }
+
+ sp->qpair->cmd_cnt++;
+ sp->flags |= SRB_DMA_VALID;
+
+ /* Set chip new ring index. */
+ wrt_reg_dword(req->req_q_in, req->ring_index);
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+ return QLA_SUCCESS;
+
+queuing_error_fcp_cmnd:
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
+
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ qla_put_fw_resources(sp->qpair, &sp->iores);
+
+ if (sp->u.scmd.crc_ctx) {
+ mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
+ sp->u.scmd.crc_ctx = NULL;
+ }
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+ return QLA_FUNCTION_FAILED;
+}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 656700f79325..d48007e18288 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -56,6 +56,22 @@ const char *const port_state_str[] = {
[FCS_ONLINE] = "ONLINE"
};
+#define SFP_DISABLE_LASER_INITIATED 0x15 /* Sub code of 8070 AEN */
+#define SFP_ENABLE_LASER_INITIATED 0x16 /* Sub code of 8070 AEN */
+
+static inline void display_Laser_info(scsi_qla_host_t *vha,
+ u16 mb1, u16 mb2, u16 mb3) {
+
+ if (mb1 == SFP_DISABLE_LASER_INITIATED)
+ ql_log(ql_log_warn, vha, 0xf0a2,
+ "SFP temperature (%d C) reached/exceeded the threshold (%d C). Laser is disabled.\n",
+ mb3, mb2);
+ if (mb1 == SFP_ENABLE_LASER_INITIATED)
+ ql_log(ql_log_warn, vha, 0xf0a3,
+ "SFP temperature (%d C) reached normal operating level. Laser is enabled.\n",
+ mb3);
+}
+
static void
qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt)
{
@@ -823,6 +839,135 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
}
}
+/**
+ * qla27xx_copy_multiple_pkt() - Copy over purex/purls packets that can
+ * span over multiple IOCBs.
+ * @vha: SCSI driver HA context
+ * @pkt: ELS packet
+ * @rsp: Response queue
+ * @is_purls: True, for Unsolicited Received FC-NVMe LS rsp IOCB
+ * false, for Unsolicited Received ELS IOCB
+ * @byte_order: True, to change the byte ordering of iocb payload
+ */
+struct purex_item *
+qla27xx_copy_multiple_pkt(struct scsi_qla_host *vha, void **pkt,
+ struct rsp_que **rsp, bool is_purls,
+ bool byte_order)
+{
+ struct purex_entry_24xx *purex = NULL;
+ struct pt_ls4_rx_unsol *purls = NULL;
+ struct rsp_que *rsp_q = *rsp;
+ sts_cont_entry_t *new_pkt;
+ uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
+ uint16_t buffer_copy_offset = 0, payload_size = 0;
+ uint16_t entry_count, entry_count_remaining;
+ struct purex_item *item;
+ void *iocb_pkt = NULL;
+
+ if (is_purls) {
+ purls = *pkt;
+ total_bytes = (le16_to_cpu(purls->frame_size) & 0x0FFF) -
+ PURX_ELS_HEADER_SIZE;
+ entry_count = entry_count_remaining = purls->entry_count;
+ payload_size = sizeof(purls->payload);
+ } else {
+ purex = *pkt;
+ total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) -
+ PURX_ELS_HEADER_SIZE;
+ entry_count = entry_count_remaining = purex->entry_count;
+ payload_size = sizeof(purex->els_frame_payload);
+ }
+
+ pending_bytes = total_bytes;
+ no_bytes = (pending_bytes > payload_size) ? payload_size :
+ pending_bytes;
+ ql_dbg(ql_dbg_async, vha, 0x509a,
+ "%s LS, frame_size 0x%x, entry count %d\n",
+ (is_purls ? "PURLS" : "FPIN"), total_bytes, entry_count);
+
+ item = qla24xx_alloc_purex_item(vha, total_bytes);
+ if (!item)
+ return item;
+
+ iocb_pkt = &item->iocb;
+
+ if (is_purls)
+ memcpy(iocb_pkt, &purls->payload[0], no_bytes);
+ else
+ memcpy(iocb_pkt, &purex->els_frame_payload[0], no_bytes);
+ buffer_copy_offset += no_bytes;
+ pending_bytes -= no_bytes;
+ --entry_count_remaining;
+
+ if (is_purls)
+ ((response_t *)purls)->signature = RESPONSE_PROCESSED;
+ else
+ ((response_t *)purex)->signature = RESPONSE_PROCESSED;
+ wmb();
+
+ do {
+ while ((total_bytes > 0) && (entry_count_remaining > 0)) {
+ if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) {
+ ql_dbg(ql_dbg_async, vha, 0x5084,
+ "Ran out of IOCBs, partial data 0x%x\n",
+ buffer_copy_offset);
+ cpu_relax();
+ continue;
+ }
+
+ new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
+ *pkt = new_pkt;
+
+ if (new_pkt->entry_type != STATUS_CONT_TYPE) {
+ ql_log(ql_log_warn, vha, 0x507a,
+ "Unexpected IOCB type, partial data 0x%x\n",
+ buffer_copy_offset);
+ break;
+ }
+
+ rsp_q->ring_index++;
+ if (rsp_q->ring_index == rsp_q->length) {
+ rsp_q->ring_index = 0;
+ rsp_q->ring_ptr = rsp_q->ring;
+ } else {
+ rsp_q->ring_ptr++;
+ }
+ no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
+ sizeof(new_pkt->data) : pending_bytes;
+ if ((buffer_copy_offset + no_bytes) <= total_bytes) {
+ memcpy(((uint8_t *)iocb_pkt + buffer_copy_offset),
+ new_pkt->data, no_bytes);
+ buffer_copy_offset += no_bytes;
+ pending_bytes -= no_bytes;
+ --entry_count_remaining;
+ } else {
+ ql_log(ql_log_warn, vha, 0x5044,
+ "Attempt to copy more that we got, optimizing..%x\n",
+ buffer_copy_offset);
+ memcpy(((uint8_t *)iocb_pkt + buffer_copy_offset),
+ new_pkt->data,
+ total_bytes - buffer_copy_offset);
+ }
+
+ ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
+ wmb();
+ }
+
+ if (pending_bytes != 0 || entry_count_remaining != 0) {
+ ql_log(ql_log_fatal, vha, 0x508b,
+ "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n",
+ total_bytes, entry_count_remaining);
+ qla24xx_free_purex_item(item);
+ return NULL;
+ }
+ } while (entry_count_remaining > 0);
+
+ if (byte_order)
+ host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes);
+
+ return item;
+}
+
int
qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
{
@@ -958,7 +1103,7 @@ initialize_purex_header:
return item;
}
-static void
+void
qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt,
void (*process_item)(struct scsi_qla_host *vha,
struct purex_item *pkt))
@@ -1121,8 +1266,12 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
unsigned long flags;
fc_port_t *fcport = NULL;
- if (!vha->hw->flags.fw_started)
+ if (!vha->hw->flags.fw_started) {
+ ql_log(ql_log_warn, vha, 0x50ff,
+ "Dropping AEN - %04x %04x %04x %04x.\n",
+ mb[0], mb[1], mb[2], mb[3]);
return;
+ }
/* Setup to process RIO completion. */
handle_cnt = 0;
@@ -1794,6 +1943,8 @@ global_port_update:
break;
case MBA_TEMPERATURE_ALERT:
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ display_Laser_info(vha, mb[1], mb[2], mb[3]);
ql_dbg(ql_dbg_async, vha, 0x505e,
"TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
break;
@@ -2539,7 +2690,6 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
case CS_PORT_BUSY:
case CS_INCOMPLETE:
case CS_PORT_UNAVAILABLE:
- case CS_TIMEOUT:
case CS_RESET:
if (atomic_read(&fcport->state) == FCS_ONLINE) {
ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
@@ -3808,13 +3958,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
struct purex_entry_24xx *purex_entry;
struct purex_item *pure_item;
+ struct pt_ls4_rx_unsol *p;
u16 rsp_in = 0, cur_ring_index;
int is_shadow_hba;
if (!ha->flags.fw_started)
return;
- if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
+ if (rsp->qpair->cpuid != raw_smp_processor_id() || !rsp->qpair->rcv_intr) {
rsp->qpair->rcv_intr = 1;
if (!rsp->qpair->cpu_mapped)
@@ -3980,7 +4131,19 @@ process_err:
qla28xx_sa_update_iocb_entry(vha, rsp->req,
(struct sa_update_28xx *)pkt);
break;
-
+ case PT_LS4_UNSOL:
+ p = (void *)pkt;
+ if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) {
+ rsp->ring_ptr = (response_t *)pkt;
+ rsp->ring_index = cur_ring_index;
+
+ ql_dbg(ql_dbg_init, vha, 0x2124,
+ "Defer processing UNSOL LS req opcode %#x...\n",
+ p->payload[0]);
+ return;
+ }
+ qla2xxx_process_purls_iocb((void **)&pkt, &rsp);
+ break;
default:
/* Type Not Supported. */
ql_dbg(ql_dbg_async, vha, 0x5042,
@@ -4305,7 +4468,7 @@ qla2xxx_msix_rsp_q(int irq, void *dev_id)
}
ha = qpair->hw;
- queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
+ queue_work(ha->wq, &qpair->q_work);
return IRQ_HANDLED;
}
@@ -4331,7 +4494,7 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
+ queue_work(ha->wq, &qpair->q_work);
return IRQ_HANDLED;
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 254fd4c64262..21ec32b4fb28 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -273,7 +273,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
wait_time = jiffies;
- atomic_inc(&ha->num_pend_mbx_stage3);
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
ql_dbg(ql_dbg_mbx, vha, 0x117a,
@@ -290,7 +289,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
atomic_dec(&ha->num_pend_mbx_stage2);
- atomic_dec(&ha->num_pend_mbx_stage3);
rval = QLA_ABORTED;
goto premature_exit;
}
@@ -302,11 +300,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
atomic_dec(&ha->num_pend_mbx_stage2);
- atomic_dec(&ha->num_pend_mbx_stage3);
rval = QLA_ABORTED;
goto premature_exit;
}
- atomic_dec(&ha->num_pend_mbx_stage3);
if (time_after(jiffies, wait_time + 5 * HZ))
ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
@@ -2213,6 +2209,9 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
"Entered %s.\n", __func__);
+ if (!ha->flags.fw_started)
+ return QLA_FUNCTION_FAILED;
+
mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
mcp->out_mb = MBX_0;
if (IS_FWI2_CAPABLE(vha->hw))
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 86e85f2f4782..a8ddf356e662 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -12,6 +12,26 @@
#include <linux/blk-mq.h>
static struct nvme_fc_port_template qla_nvme_fc_transport;
+static int qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha,
+ struct qla_qpair *qp,
+ struct qla_nvme_lsrjt_pt_arg *a,
+ bool is_xchg_terminate);
+
+struct qla_nvme_unsol_ctx {
+ struct list_head elem;
+ struct scsi_qla_host *vha;
+ struct fc_port *fcport;
+ struct srb *sp;
+ struct nvmefc_ls_rsp lsrsp;
+ struct nvmefc_ls_rsp *fd_rsp;
+ struct work_struct lsrsp_work;
+ struct work_struct abort_work;
+ __le32 exchange_address;
+ __le16 nport_handle;
+ __le16 ox_id;
+ int comp_status;
+ spinlock_t cmd_lock;
+};
int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
{
@@ -132,6 +152,7 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
"Failed to allocate qpair\n");
return -EINVAL;
}
+ qla_adjust_iocb_limit(vha);
}
*handle = qpair;
@@ -215,6 +236,55 @@ static void qla_nvme_sp_ls_done(srb_t *sp, int res)
schedule_work(&priv->ls_work);
}
+static void qla_nvme_release_lsrsp_cmd_kref(struct kref *kref)
+{
+ struct srb *sp = container_of(kref, struct srb, cmd_kref);
+ struct qla_nvme_unsol_ctx *uctx = sp->priv;
+ struct nvmefc_ls_rsp *fd_rsp;
+ unsigned long flags;
+
+ if (!uctx) {
+ qla2x00_rel_sp(sp);
+ return;
+ }
+
+ spin_lock_irqsave(&uctx->cmd_lock, flags);
+ uctx->sp = NULL;
+ sp->priv = NULL;
+ spin_unlock_irqrestore(&uctx->cmd_lock, flags);
+
+ fd_rsp = uctx->fd_rsp;
+
+ list_del(&uctx->elem);
+
+ fd_rsp->done(fd_rsp);
+ kfree(uctx);
+ qla2x00_rel_sp(sp);
+}
+
+static void qla_nvme_lsrsp_complete(struct work_struct *work)
+{
+ struct qla_nvme_unsol_ctx *uctx =
+ container_of(work, struct qla_nvme_unsol_ctx, lsrsp_work);
+
+ kref_put(&uctx->sp->cmd_kref, qla_nvme_release_lsrsp_cmd_kref);
+}
+
+static void qla_nvme_sp_lsrsp_done(srb_t *sp, int res)
+{
+ struct qla_nvme_unsol_ctx *uctx = sp->priv;
+
+ if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
+ return;
+
+ if (res)
+ res = -EINVAL;
+
+ uctx->comp_status = res;
+ INIT_WORK(&uctx->lsrsp_work, qla_nvme_lsrsp_complete);
+ schedule_work(&uctx->lsrsp_work);
+}
+
/* it assumed that QPair lock is held. */
static void qla_nvme_sp_done(srb_t *sp, int res)
{
@@ -287,6 +357,92 @@ out:
kref_put(&sp->cmd_kref, sp->put_fn);
}
+static int qla_nvme_xmt_ls_rsp(struct nvme_fc_local_port *lport,
+ struct nvme_fc_remote_port *rport,
+ struct nvmefc_ls_rsp *fd_resp)
+{
+ struct qla_nvme_unsol_ctx *uctx = container_of(fd_resp,
+ struct qla_nvme_unsol_ctx, lsrsp);
+ struct qla_nvme_rport *qla_rport = rport->private;
+ fc_port_t *fcport = qla_rport->fcport;
+ struct scsi_qla_host *vha = uctx->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_nvme_lsrjt_pt_arg a;
+ struct srb_iocb *nvme;
+ srb_t *sp;
+ int rval = QLA_FUNCTION_FAILED;
+ uint8_t cnt = 0;
+
+ if (!fcport || fcport->deleted)
+ goto out;
+
+ if (!ha->flags.fw_started)
+ goto out;
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto out;
+
+ sp->type = SRB_NVME_LS;
+ sp->name = "nvme_ls";
+ sp->done = qla_nvme_sp_lsrsp_done;
+ sp->put_fn = qla_nvme_release_lsrsp_cmd_kref;
+ sp->priv = (void *)uctx;
+ sp->unsol_rsp = 1;
+ uctx->sp = sp;
+ spin_lock_init(&uctx->cmd_lock);
+ nvme = &sp->u.iocb_cmd;
+ uctx->fd_rsp = fd_resp;
+ nvme->u.nvme.desc = fd_resp;
+ nvme->u.nvme.dir = 0;
+ nvme->u.nvme.dl = 0;
+ nvme->u.nvme.timeout_sec = 0;
+ nvme->u.nvme.cmd_dma = fd_resp->rspdma;
+ nvme->u.nvme.cmd_len = cpu_to_le32(fd_resp->rsplen);
+ nvme->u.nvme.rsp_len = 0;
+ nvme->u.nvme.rsp_dma = 0;
+ nvme->u.nvme.exchange_address = uctx->exchange_address;
+ nvme->u.nvme.nport_handle = uctx->nport_handle;
+ nvme->u.nvme.ox_id = uctx->ox_id;
+ dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
+ fd_resp->rsplen, DMA_TO_DEVICE);
+
+ ql_dbg(ql_dbg_unsol, vha, 0x2122,
+ "Unsol lsreq portid=%06x %8phC exchange_address 0x%x ox_id 0x%x hdl 0x%x\n",
+ fcport->d_id.b24, fcport->port_name, uctx->exchange_address,
+ uctx->ox_id, uctx->nport_handle);
+retry:
+ rval = qla2x00_start_sp(sp);
+ switch (rval) {
+ case QLA_SUCCESS:
+ break;
+ case EAGAIN:
+ msleep(PURLS_MSLEEP_INTERVAL);
+ cnt++;
+ if (cnt < PURLS_RETRY_COUNT)
+ goto retry;
+
+ fallthrough;
+ default:
+ ql_dbg(ql_log_warn, vha, 0x2123,
+ "Failed to xmit Unsol ls response = %d\n", rval);
+ rval = -EIO;
+ qla2x00_rel_sp(sp);
+ goto out;
+ }
+
+ return 0;
+out:
+ memset((void *)&a, 0, sizeof(a));
+ a.vp_idx = vha->vp_idx;
+ a.nport_handle = uctx->nport_handle;
+ a.xchg_address = uctx->exchange_address;
+ qla_nvme_ls_reject_iocb(vha, ha->base_qpair, &a, true);
+ kfree(uctx);
+ return rval;
+}
+
static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
{
@@ -348,8 +504,8 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
nvme->u.nvme.desc = fd;
nvme->u.nvme.dir = 0;
nvme->u.nvme.dl = 0;
- nvme->u.nvme.cmd_len = fd->rqstlen;
- nvme->u.nvme.rsp_len = fd->rsplen;
+ nvme->u.nvme.cmd_len = cpu_to_le32(fd->rqstlen);
+ nvme->u.nvme.rsp_len = cpu_to_le32(fd->rsplen);
nvme->u.nvme.rsp_dma = fd->rspdma;
nvme->u.nvme.timeout_sec = fd->timeout;
nvme->u.nvme.cmd_dma = fd->rqstdma;
@@ -667,7 +823,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
rval = qla2x00_start_nvme_mq(sp);
if (rval != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x212d,
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x212d,
"qla2x00_start_nvme_mq failed = %d\n", rval);
sp->priv = NULL;
priv->sp = NULL;
@@ -719,6 +875,7 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = {
.ls_abort = qla_nvme_ls_abort,
.fcp_io = qla_nvme_post_cmd,
.fcp_abort = qla_nvme_fcp_abort,
+ .xmt_ls_rsp = qla_nvme_xmt_ls_rsp,
.map_queues = qla_nvme_map_queues,
.max_hw_queues = DEF_NVME_HW_QUEUES,
.max_sgl_segments = 1024,
@@ -923,3 +1080,247 @@ inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
return;
kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);
}
+
+static void qla_nvme_fc_format_rjt(void *buf, u8 ls_cmd, u8 reason,
+ u8 explanation, u8 vendor)
+{
+ struct fcnvme_ls_rjt *rjt = buf;
+
+ rjt->w0.ls_cmd = FCNVME_LSDESC_RQST;
+ rjt->desc_list_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt));
+ rjt->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
+ rjt->rqst.desc_len =
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
+ rjt->rqst.w0.ls_cmd = ls_cmd;
+ rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
+ rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
+ rjt->rjt.reason_code = reason;
+ rjt->rjt.reason_explanation = explanation;
+ rjt->rjt.vendor = vendor;
+}
+
+static void qla_nvme_lsrjt_pt_iocb(struct scsi_qla_host *vha,
+ struct pt_ls4_request *lsrjt_iocb,
+ struct qla_nvme_lsrjt_pt_arg *a)
+{
+ lsrjt_iocb->entry_type = PT_LS4_REQUEST;
+ lsrjt_iocb->entry_count = 1;
+ lsrjt_iocb->sys_define = 0;
+ lsrjt_iocb->entry_status = 0;
+ lsrjt_iocb->handle = QLA_SKIP_HANDLE;
+ lsrjt_iocb->nport_handle = a->nport_handle;
+ lsrjt_iocb->exchange_address = a->xchg_address;
+ lsrjt_iocb->vp_index = a->vp_idx;
+
+ lsrjt_iocb->control_flags = cpu_to_le16(a->control_flags);
+
+ put_unaligned_le64(a->tx_addr, &lsrjt_iocb->dsd[0].address);
+ lsrjt_iocb->dsd[0].length = cpu_to_le32(a->tx_byte_count);
+ lsrjt_iocb->tx_dseg_count = cpu_to_le16(1);
+ lsrjt_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count);
+
+ put_unaligned_le64(a->rx_addr, &lsrjt_iocb->dsd[1].address);
+ lsrjt_iocb->dsd[1].length = 0;
+ lsrjt_iocb->rx_dseg_count = 0;
+ lsrjt_iocb->rx_byte_count = 0;
+}
+
+static int
+qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha, struct qla_qpair *qp,
+ struct qla_nvme_lsrjt_pt_arg *a, bool is_xchg_terminate)
+{
+ struct pt_ls4_request *lsrjt_iocb;
+
+ lsrjt_iocb = __qla2x00_alloc_iocbs(qp, NULL);
+ if (!lsrjt_iocb) {
+ ql_log(ql_log_warn, vha, 0x210e,
+ "qla2x00_alloc_iocbs failed.\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ if (!is_xchg_terminate) {
+ qla_nvme_fc_format_rjt((void *)vha->hw->lsrjt.c, a->opcode,
+ a->reason, a->explanation, 0);
+
+ a->tx_byte_count = sizeof(struct fcnvme_ls_rjt);
+ a->tx_addr = vha->hw->lsrjt.cdma;
+ a->control_flags = CF_LS4_RESPONDER << CF_LS4_SHIFT;
+
+ ql_dbg(ql_dbg_unsol, vha, 0x211f,
+ "Sending nvme fc ls reject ox_id %04x op %04x\n",
+ a->ox_id, a->opcode);
+ ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x210f,
+ vha->hw->lsrjt.c, sizeof(*vha->hw->lsrjt.c));
+ } else {
+ a->tx_byte_count = 0;
+ a->control_flags = CF_LS4_RESPONDER_TERM << CF_LS4_SHIFT;
+ ql_dbg(ql_dbg_unsol, vha, 0x2110,
+ "Terminate nvme ls xchg 0x%x\n", a->xchg_address);
+ }
+
+ qla_nvme_lsrjt_pt_iocb(vha, lsrjt_iocb, a);
+ /* flush iocb to mem before notifying hw doorbell */
+ wmb();
+ qla2x00_start_iocbs(vha, qp->req);
+ return 0;
+}
+
+/*
+ * qla2xxx_process_purls_pkt() - Pass-up Unsolicited
+ * Received FC-NVMe Link Service pkt to nvme_fc_rcv_ls_req().
+ * LLDD need to provide memory for response buffer, which
+ * will be used to reference the exchange corresponding
+ * to the LS when issuing an ls response. LLDD will have to free
+ * response buffer in lport->ops->xmt_ls_rsp().
+ *
+ * @vha: SCSI qla host
+ * @item: ptr to purex_item
+ */
+static void
+qla2xxx_process_purls_pkt(struct scsi_qla_host *vha, struct purex_item *item)
+{
+ struct qla_nvme_unsol_ctx *uctx = item->purls_context;
+ struct qla_nvme_lsrjt_pt_arg a;
+ int ret = 1;
+
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ ret = nvme_fc_rcv_ls_req(uctx->fcport->nvme_remote_port, &uctx->lsrsp,
+ &item->iocb, item->size);
+#endif
+ if (ret) {
+ ql_dbg(ql_dbg_unsol, vha, 0x2125, "NVMe transport ls_req failed\n");
+ memset((void *)&a, 0, sizeof(a));
+ a.vp_idx = vha->vp_idx;
+ a.nport_handle = uctx->nport_handle;
+ a.xchg_address = uctx->exchange_address;
+ qla_nvme_ls_reject_iocb(vha, vha->hw->base_qpair, &a, true);
+ list_del(&uctx->elem);
+ kfree(uctx);
+ }
+}
+
+static scsi_qla_host_t *
+qla2xxx_get_vha_from_vp_idx(struct qla_hw_data *ha, uint16_t vp_index)
+{
+ scsi_qla_host_t *base_vha, *vha, *tvp;
+ unsigned long flags;
+
+ base_vha = pci_get_drvdata(ha->pdev);
+
+ if (!vp_index && !ha->num_vhosts)
+ return base_vha;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) {
+ if (vha->vp_idx == vp_index) {
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ return vha;
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return NULL;
+}
+
+void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp)
+{
+ struct nvme_fc_remote_port *rport;
+ struct qla_nvme_rport *qla_rport;
+ struct qla_nvme_lsrjt_pt_arg a;
+ struct pt_ls4_rx_unsol *p = *pkt;
+ struct qla_nvme_unsol_ctx *uctx;
+ struct rsp_que *rsp_q = *rsp;
+ struct qla_hw_data *ha;
+ scsi_qla_host_t *vha;
+ fc_port_t *fcport = NULL;
+ struct purex_item *item;
+ port_id_t d_id = {0};
+ port_id_t id = {0};
+ u8 *opcode;
+ bool xmt_reject = false;
+
+ ha = rsp_q->hw;
+
+ vha = qla2xxx_get_vha_from_vp_idx(ha, p->vp_index);
+ if (!vha) {
+ ql_log(ql_log_warn, NULL, 0x2110, "Invalid vp index %d\n", p->vp_index);
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ memset((void *)&a, 0, sizeof(a));
+ opcode = (u8 *)&p->payload[0];
+ a.opcode = opcode[3];
+ a.vp_idx = p->vp_index;
+ a.nport_handle = p->nport_handle;
+ a.ox_id = p->ox_id;
+ a.xchg_address = p->exchange_address;
+
+ id.b.domain = p->s_id.domain;
+ id.b.area = p->s_id.area;
+ id.b.al_pa = p->s_id.al_pa;
+ d_id.b.domain = p->d_id[2];
+ d_id.b.area = p->d_id[1];
+ d_id.b.al_pa = p->d_id[0];
+
+ fcport = qla2x00_find_fcport_by_nportid(vha, &id, 0);
+ if (!fcport) {
+ ql_dbg(ql_dbg_unsol, vha, 0x211e,
+ "Failed to find sid=%06x did=%06x\n",
+ id.b24, d_id.b24);
+ a.reason = FCNVME_RJT_RC_INV_ASSOC;
+ a.explanation = FCNVME_RJT_EXP_NONE;
+ xmt_reject = true;
+ goto out;
+ }
+ rport = fcport->nvme_remote_port;
+ qla_rport = rport->private;
+
+ item = qla27xx_copy_multiple_pkt(vha, pkt, rsp, true, false);
+ if (!item) {
+ a.reason = FCNVME_RJT_RC_LOGIC;
+ a.explanation = FCNVME_RJT_EXP_NONE;
+ xmt_reject = true;
+ goto out;
+ }
+
+ uctx = kzalloc(sizeof(*uctx), GFP_ATOMIC);
+ if (!uctx) {
+ ql_log(ql_log_info, vha, 0x2126, "Failed allocate memory\n");
+ a.reason = FCNVME_RJT_RC_LOGIC;
+ a.explanation = FCNVME_RJT_EXP_NONE;
+ xmt_reject = true;
+ kfree(item);
+ goto out;
+ }
+
+ uctx->vha = vha;
+ uctx->fcport = fcport;
+ uctx->exchange_address = p->exchange_address;
+ uctx->nport_handle = p->nport_handle;
+ uctx->ox_id = p->ox_id;
+ qla_rport->uctx = uctx;
+ INIT_LIST_HEAD(&uctx->elem);
+ list_add_tail(&uctx->elem, &fcport->unsol_ctx_head);
+ item->purls_context = (void *)uctx;
+
+ ql_dbg(ql_dbg_unsol, vha, 0x2121,
+ "PURLS OP[%01x] size %d xchg addr 0x%x portid %06x\n",
+ item->iocb.iocb[3], item->size, uctx->exchange_address,
+ fcport->d_id.b24);
+ /* +48 0 1 2 3 4 5 6 7 8 9 A B C D E F
+ * ----- -----------------------------------------------
+ * 0000: 00 00 00 05 28 00 00 00 07 00 00 00 08 00 00 00
+ * 0010: ab ec 0f cc 00 00 8d 7d 05 00 00 00 10 00 00 00
+ * 0020: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ */
+ ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x2120,
+ &item->iocb, item->size);
+
+ qla24xx_queue_purex_item(vha, item, qla2xxx_process_purls_pkt);
+out:
+ if (xmt_reject) {
+ qla_nvme_ls_reject_iocb(vha, (*rsp)->qpair, &a, false);
+ __qla_consume_iocb(vha, pkt, rsp);
+ }
+}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index d299478371b2..a253ac55171b 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -21,6 +21,7 @@
#define Q2T_NVME_NUM_TAGS 2048
#define QLA_MAX_FC_SEGMENTS 64
+struct qla_nvme_unsol_ctx;
struct scsi_qla_host;
struct qla_hw_data;
struct req_que;
@@ -37,6 +38,7 @@ struct nvme_private {
struct qla_nvme_rport {
struct fc_port *fcport;
+ struct qla_nvme_unsol_ctx *uctx;
};
#define COMMAND_NVME 0x88 /* Command Type FC-NVMe IOCB */
@@ -75,6 +77,9 @@ struct cmd_nvme {
struct dsd64 nvme_dsd;
};
+#define PURLS_MSLEEP_INTERVAL 1
+#define PURLS_RETRY_COUNT 5
+
#define PT_LS4_REQUEST 0x89 /* Link Service pass-through IOCB (request) */
struct pt_ls4_request {
uint8_t entry_type;
@@ -118,21 +123,19 @@ struct pt_ls4_rx_unsol {
__le32 exchange_address;
uint8_t d_id[3];
uint8_t r_ctl;
- be_id_t s_id;
+ le_id_t s_id;
uint8_t cs_ctl;
uint8_t f_ctl[3];
uint8_t type;
__le16 seq_cnt;
uint8_t df_ctl;
uint8_t seq_id;
- __le16 rx_id;
- __le16 ox_id;
- __le32 param;
- __le32 desc0;
+ __le16 rx_id;
+ __le16 ox_id;
+ __le32 desc0;
#define PT_LS4_PAYLOAD_OFFSET 0x2c
#define PT_LS4_FIRST_PACKET_LEN 20
- __le32 desc_len;
- __le32 payload[3];
+ __le32 payload[5];
};
/*
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 6dc80c8ddf79..5d1bdc15b75c 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -857,7 +857,9 @@ struct fcp_cmnd {
uint8_t task_attribute;
uint8_t task_management;
uint8_t additional_cdb_len;
- uint8_t cdb[260]; /* 256 for CDB len and 4 for FCP_DL */
+#define QLA_CDB_BUF_SIZE 256
+#define QLA_FCP_DL_SIZE 4
+ uint8_t cdb[QLA_CDB_BUF_SIZE + QLA_FCP_DL_SIZE]; /* 256 for CDB len and 4 for FCP_DL */
};
struct dsd_dma {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 877e4f446709..50db08265c51 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -44,10 +44,11 @@ module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
"Set this to take full dump on MPI hang.");
-int ql2xenforce_iocb_limit = 1;
+int ql2xenforce_iocb_limit = 2;
module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xenforce_iocb_limit,
- "Enforce IOCB throttling, to avoid FW congestion. (default: 1)");
+ "Enforce IOCB throttling, to avoid FW congestion. (default: 2) "
+ "1: track usage per queue, 2: track usage per adapter");
/*
* CT6 CTX allocation cache
@@ -432,6 +433,7 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
ha->base_qpair->srb_mempool = ha->srb_mempool;
INIT_LIST_HEAD(&ha->base_qpair->hints_list);
+ INIT_LIST_HEAD(&ha->base_qpair->dsd_list);
ha->base_qpair->enable_class_2 = ql2xenableclass2;
/* init qpair to this cpu. Will adjust at run time. */
qla_cpu_update(rsp->qpair, raw_smp_processor_id());
@@ -750,9 +752,9 @@ void qla2x00_sp_free_dma(srb_t *sp)
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
ctx1->fcp_cmnd_dma);
- list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
- ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
- ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+ list_splice(&ctx1->dsd_list, &sp->qpair->dsd_list);
+ sp->qpair->dsd_inuse -= ctx1->dsd_use_cnt;
+ sp->qpair->dsd_avail += ctx1->dsd_use_cnt;
}
if (sp->flags & SRB_GOT_BUF)
@@ -836,9 +838,9 @@ void qla2xxx_qpair_sp_free_dma(srb_t *sp)
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
ctx1->fcp_cmnd_dma);
- list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
- ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
- ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+ list_splice(&ctx1->dsd_list, &sp->qpair->dsd_list);
+ sp->qpair->dsd_inuse -= ctx1->dsd_use_cnt;
+ sp->qpair->dsd_avail += ctx1->dsd_use_cnt;
sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
}
@@ -1488,8 +1490,9 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
goto eh_reset_failed;
}
err = 3;
- if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
- sdev->lun, WAIT_LUN) != QLA_SUCCESS) {
+ if (qla2x00_eh_wait_for_pending_commands(vha, fcport->d_id.b24,
+ cmd->device->lun,
+ WAIT_LUN) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800d,
"wait for pending cmds failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
@@ -1555,8 +1558,8 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
goto eh_reset_failed;
}
err = 3;
- if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
- 0, WAIT_TARGET) != QLA_SUCCESS) {
+ if (qla2x00_eh_wait_for_pending_commands(vha, fcport->d_id.b24, 0,
+ WAIT_TARGET) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800d,
"wait for pending cmds failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
@@ -3006,9 +3009,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->max_exchg = FW_MAX_EXCHANGES_CNT;
atomic_set(&ha->num_pend_mbx_stage1, 0);
atomic_set(&ha->num_pend_mbx_stage2, 0);
- atomic_set(&ha->num_pend_mbx_stage3, 0);
atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD);
ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD;
+ INIT_LIST_HEAD(&ha->tmf_pending);
+ INIT_LIST_HEAD(&ha->tmf_active);
/* Assign ISP specific operations. */
if (IS_QLA2100(ha)) {
@@ -3285,6 +3289,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_id = ha->max_fibre_devices;
host->cmd_per_lun = 3;
host->unique_id = host->host_no;
+
+ if (ql2xenabledif && ql2xenabledif != 2) {
+ ql_log(ql_log_warn, base_vha, 0x302d,
+ "Invalid value for ql2xenabledif, resetting it to default (2)\n");
+ ql2xenabledif = 2;
+ }
+
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
host->max_cmd_len = 32;
else
@@ -3521,8 +3532,6 @@ skip_dpc:
base_vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_init, base_vha, 0x00f1,
"Registering for DIF/DIX type 1 and 3 protection.\n");
- if (ql2xenabledif == 1)
- prot = SHOST_DIX_TYPE0_PROTECTION;
if (ql2xprotmask)
scsi_host_set_prot(host, ql2xprotmask);
else
@@ -4399,7 +4408,6 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
"sf_init_cb=%p.\n", ha->sf_init_cb);
}
- INIT_LIST_HEAD(&ha->gbl_dsd_list);
/* Get consistent memory allocated for Async Port-Database. */
if (!IS_FWI2_CAPABLE(ha)) {
@@ -4454,8 +4462,9 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16;
ha->elsrej.c = dma_alloc_coherent(&ha->pdev->dev,
- ha->elsrej.size, &ha->elsrej.cdma, GFP_KERNEL);
-
+ ha->elsrej.size,
+ &ha->elsrej.cdma,
+ GFP_KERNEL);
if (!ha->elsrej.c) {
ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff,
"Alloc failed for els reject cmd.\n");
@@ -4464,8 +4473,21 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->elsrej.c->er_cmd = ELS_LS_RJT;
ha->elsrej.c->er_reason = ELS_RJT_LOGIC;
ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA;
+
+ ha->lsrjt.size = sizeof(struct fcnvme_ls_rjt);
+ ha->lsrjt.c = dma_alloc_coherent(&ha->pdev->dev, ha->lsrjt.size,
+ &ha->lsrjt.cdma, GFP_KERNEL);
+ if (!ha->lsrjt.c) {
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff,
+ "Alloc failed for nvme fc reject cmd.\n");
+ goto fail_lsrjt;
+ }
+
return 0;
+fail_lsrjt:
+ dma_free_coherent(&ha->pdev->dev, ha->elsrej.size,
+ ha->elsrej.c, ha->elsrej.cdma);
fail_elsrej:
dma_pool_destroy(ha->purex_dma_pool);
fail_flt:
@@ -4931,18 +4953,16 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->gid_list = NULL;
ha->gid_list_dma = 0;
- if (IS_QLA82XX(ha)) {
- if (!list_empty(&ha->gbl_dsd_list)) {
- struct dsd_dma *dsd_ptr, *tdsd_ptr;
-
- /* clean up allocated prev pool */
- list_for_each_entry_safe(dsd_ptr,
- tdsd_ptr, &ha->gbl_dsd_list, list) {
- dma_pool_free(ha->dl_dma_pool,
- dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
- list_del(&dsd_ptr->list);
- kfree(dsd_ptr);
- }
+ if (!list_empty(&ha->base_qpair->dsd_list)) {
+ struct dsd_dma *dsd_ptr, *tdsd_ptr;
+
+ /* clean up allocated prev pool */
+ list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
+ &ha->base_qpair->dsd_list, list) {
+ dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
+ dsd_ptr->dsd_list_dma);
+ list_del(&dsd_ptr->list);
+ kfree(dsd_ptr);
}
}
@@ -4997,6 +5017,12 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->elsrej.c = NULL;
}
+ if (ha->lsrjt.c) {
+ dma_free_coherent(&ha->pdev->dev, ha->lsrjt.size, ha->lsrjt.c,
+ ha->lsrjt.cdma);
+ ha->lsrjt.c = NULL;
+ }
+
ha->init_cb = NULL;
ha->init_cb_dma = 0;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 5258b07687a9..2ef2dbac0db2 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1068,10 +1068,6 @@ void qlt_free_session_done(struct work_struct *work)
(struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
}
- spin_lock_irqsave(&vha->work_lock, flags);
- sess->flags &= ~FCF_ASYNC_SENT;
- spin_unlock_irqrestore(&vha->work_lock, flags);
-
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (sess->se_sess) {
sess->se_sess = NULL;
@@ -1081,7 +1077,6 @@ void qlt_free_session_done(struct work_struct *work)
qla2x00_set_fcport_disc_state(sess, DSC_DELETED);
sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
- sess->deleted = QLA_SESS_DELETED;
if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
vha->fcport_count--;
@@ -1133,10 +1128,15 @@ void qlt_free_session_done(struct work_struct *work)
sess->explicit_logout = 0;
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- sess->free_pending = 0;
qla2x00_dfs_remove_rport(vha, sess);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ sess->flags &= ~FCF_ASYNC_SENT;
+ sess->deleted = QLA_SESS_DELETED;
+ sess->free_pending = 0;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
ql_dbg(ql_dbg_disc, vha, 0xf001,
"Unregistration of sess %p %8phC finished fcp_cnt %d\n",
sess, sess->port_name, vha->fcport_count);
@@ -1185,12 +1185,12 @@ void qlt_unreg_sess(struct fc_port *sess)
* management from being sent.
*/
sess->flags |= FCF_ASYNC_SENT;
+ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
spin_unlock_irqrestore(&sess->vha->work_lock, flags);
if (sess->se_sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
- sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
sess->last_rscn_gen = sess->rscn_gen;
sess->last_login_gen = sess->login_gen;
@@ -4425,8 +4425,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
} else if (ha->msix_count) {
if (cmd->atio.u.isp24.fcp_cmnd.rddata)
- queue_work_on(smp_processor_id(), qla_tgt_wq,
- &cmd->work);
+ queue_work(qla_tgt_wq, &cmd->work);
else
queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
&cmd->work);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index e3771923b0d7..d903563e969e 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.08.400-k"
+#define QLA2XXX_VERSION "10.02.09.100-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
-#define QLA_DRIVER_PATCH_VER 8
-#define QLA_DRIVER_BETA_VER 400
+#define QLA_DRIVER_PATCH_VER 9
+#define QLA_DRIVER_BETA_VER 100
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 3b5ba4b47b3b..68a0e6a2fb6e 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -310,7 +310,7 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
cmd->trc_flags |= TRC_CMD_DONE;
INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
- queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
+ queue_work(tcm_qla2xxx_free_wq, &cmd->work);
}
/*
@@ -547,7 +547,7 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
cmd->trc_flags |= TRC_DATA_IN;
cmd->cmd_in_wq = 1;
INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
- queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
+ queue_work(tcm_qla2xxx_free_wq, &cmd->work);
}
static int tcm_qla2xxx_chk_dif_tags(uint32_t tag)
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index b2a3988e1e15..675332e49a7b 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -968,6 +968,11 @@ static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
memset(&chap_rec, 0, sizeof(chap_rec));
nla_for_each_attr(attr, data, len, rem) {
+ if (nla_len(attr) < sizeof(*param_info)) {
+ rc = -EINVAL;
+ goto exit_set_chap;
+ }
+
param_info = nla_data(attr);
switch (param_info->param) {
@@ -2750,6 +2755,11 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
}
nla_for_each_attr(attr, data, len, rem) {
+ if (nla_len(attr) < sizeof(*iface_param)) {
+ rval = -EINVAL;
+ goto exit_init_fw_cb;
+ }
+
iface_param = nla_data(attr);
if (iface_param->param_type == ISCSI_NET_PARAM) {
@@ -8104,6 +8114,11 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
memset((void *)&chap_tbl, 0, sizeof(chap_tbl));
nla_for_each_attr(attr, data, len, rem) {
+ if (nla_len(attr) < sizeof(*fnode_param)) {
+ rc = -EINVAL;
+ goto exit_set_param;
+ }
+
fnode_param = nla_data(attr);
switch (fnode_param->param) {
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 1e8fbd457248..3b95f7a6216f 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -28,7 +28,7 @@
#include <linux/jiffies.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/firmware.h>
#include <linux/pgtable.h>
@@ -843,7 +843,7 @@ static int qpti_map_queues(struct qlogicpti *qpti)
return 0;
}
-const char *qlogicpti_info(struct Scsi_Host *host)
+static const char *qlogicpti_info(struct Scsi_Host *host)
{
static char buf[80];
struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index d0911bc28663..89367c4bf0ef 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -613,6 +613,17 @@ void scsi_cdl_check(struct scsi_device *sdev)
bool cdl_supported;
unsigned char *buf;
+ /*
+ * Support for CDL was defined in SPC-5. Ignore devices reporting an
+ * lower SPC version. This also avoids problems with old drives choking
+ * on MAINTENANCE_IN / MI_REPORT_SUPPORTED_OPERATION_CODES with a
+ * service action specified, as done in scsi_cdl_check_cmd().
+ */
+ if (sdev->scsi_level < SCSI_SPC_5) {
+ sdev->cdl_supported = 0;
+ return;
+ }
+
buf = kmalloc(SCSI_CDL_CHECK_BUF_LEN, GFP_KERNEL);
if (!buf) {
sdev->cdl_supported = 0;
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index 217b70c678c3..f795848b316c 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -3,6 +3,7 @@
#include <linux/seq_file.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_host.h>
#include "scsi_debugfs.h"
#define SCSI_CMD_FLAG_NAME(name)[const_ilog2(SCMD_##name)] = #name
@@ -33,14 +34,33 @@ static int scsi_flags_show(struct seq_file *m, const unsigned long flags,
void scsi_show_rq(struct seq_file *m, struct request *rq)
{
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq), *cmd2;
+ struct Scsi_Host *shost = cmd->device->host;
int alloc_ms = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc);
int timeout_ms = jiffies_to_msecs(rq->timeout);
+ const char *list_info = NULL;
char buf[80] = "(?)";
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry(cmd2, &shost->eh_abort_list, eh_entry) {
+ if (cmd == cmd2) {
+ list_info = "on eh_abort_list";
+ goto unlock;
+ }
+ }
+ list_for_each_entry(cmd2, &shost->eh_cmd_q, eh_entry) {
+ if (cmd == cmd2) {
+ list_info = "on eh_cmd_q";
+ goto unlock;
+ }
+ }
+unlock:
+ spin_unlock_irq(shost->host_lock);
+
__scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len);
- seq_printf(m, ", .cmd=%s, .retries=%d, .result = %#x, .flags=", buf,
- cmd->retries, cmd->result);
+ seq_printf(m, ", .cmd=%s, .retries=%d, .allowed=%d, .result = %#x, %s%s.flags=",
+ buf, cmd->retries, cmd->allowed, cmd->result,
+ list_info ? : "", list_info ? ", " : "");
scsi_flags_show(m, cmd->flags, scsi_cmd_flags,
ARRAY_SIZE(scsi_cmd_flags));
seq_printf(m, ", .timeout=%d.%03d, allocated %d.%03d s ago",
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 59176946ab56..c2f647a7c1b0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2454,7 +2454,7 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
envp[idx++] = "SDEV_MEDIA_CHANGE=1";
break;
case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
- scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_rescan_device(sdev);
envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
break;
case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index f42388ecb024..3f0dfb97db6b 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -103,7 +103,6 @@ bool scsi_noretry_cmd(struct scsi_cmnd *scmd);
void scsi_eh_done(struct scsi_cmnd *scmd);
/* scsi_lib.c */
-extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
extern void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd);
extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
@@ -138,7 +137,6 @@ extern int scsi_complete_async_scans(void);
extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
unsigned int, u64, enum scsi_scan_mode);
extern void scsi_forget_host(struct Scsi_Host *);
-extern void scsi_rescan_device(struct device *);
/* scsi_sysctl.c */
#ifdef CONFIG_SYSCTL
@@ -155,7 +153,6 @@ extern int scsi_sysfs_add_host(struct Scsi_Host *);
extern int scsi_sysfs_register(void);
extern void scsi_sysfs_unregister(void);
extern void scsi_sysfs_device_initialize(struct scsi_device *);
-extern int scsi_sysfs_target_initialize(struct scsi_device *);
extern struct scsi_transport_template blank_transport_template;
extern void __scsi_remove_device(struct scsi_device *);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index aa13feb17c62..902655d75947 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -822,7 +822,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
* device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so
* non-zero LUNs can be scanned.
*/
- sdev->scsi_level = inq_result[2] & 0x07;
+ sdev->scsi_level = inq_result[2] & 0x0f;
if (sdev->scsi_level >= 2 ||
(sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
sdev->scsi_level++;
@@ -1619,12 +1619,24 @@ int scsi_add_device(struct Scsi_Host *host, uint channel,
}
EXPORT_SYMBOL(scsi_add_device);
-void scsi_rescan_device(struct device *dev)
+int scsi_rescan_device(struct scsi_device *sdev)
{
- struct scsi_device *sdev = to_scsi_device(dev);
+ struct device *dev = &sdev->sdev_gendev;
+ int ret = 0;
device_lock(dev);
+ /*
+ * Bail out if the device is not running. Otherwise, the rescan may
+ * block waiting for commands to be executed, with us holding the
+ * device lock. This can result in a potential deadlock in the power
+ * management core code when system resume is on-going.
+ */
+ if (sdev->sdev_state != SDEV_RUNNING) {
+ ret = -EWOULDBLOCK;
+ goto unlock;
+ }
+
scsi_attach_vpd(sdev);
scsi_cdl_check(sdev);
@@ -1638,7 +1650,11 @@ void scsi_rescan_device(struct device *dev)
drv->rescan(dev);
module_put(dev->driver->owner);
}
+
+unlock:
device_unlock(dev);
+
+ return ret;
}
EXPORT_SYMBOL(scsi_rescan_device);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 60317676e45f..24f6eefb6803 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -747,7 +747,7 @@ static ssize_t
store_rescan_field (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- scsi_rescan_device(dev);
+ scsi_rescan_device(to_scsi_device(dev));
return count;
}
static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field);
@@ -840,7 +840,7 @@ store_state_field(struct device *dev, struct device_attribute *attr,
* waiting for pending I/O to finish.
*/
blk_mq_run_hw_queues(sdev->request_queue, true);
- scsi_rescan_device(dev);
+ scsi_rescan_device(sdev);
}
return ret == 0 ? count : -EINVAL;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index e527ece12453..3075b2ddf7a6 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -3014,14 +3014,15 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev
}
static int
-iscsi_if_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+iscsi_if_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen)
{
char *data = (char*)ev + sizeof(*ev);
struct iscsi_cls_conn *conn;
struct iscsi_cls_session *session;
int err = 0, value = 0, state;
- if (ev->u.set_param.len > PAGE_SIZE)
+ if (ev->u.set_param.len > rlen ||
+ ev->u.set_param.len > PAGE_SIZE)
return -EINVAL;
session = iscsi_session_lookup(ev->u.set_param.sid);
@@ -3029,6 +3030,10 @@ iscsi_if_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
if (!conn || !session)
return -EINVAL;
+ /* data will be regarded as NULL-ended string, do length check */
+ if (strlen(data) > ev->u.set_param.len)
+ return -EINVAL;
+
switch (ev->u.set_param.param) {
case ISCSI_PARAM_SESS_RECOVERY_TMO:
sscanf(data, "%d", &value);
@@ -3118,7 +3123,7 @@ put_ep:
static int
iscsi_if_transport_ep(struct iscsi_transport *transport,
- struct iscsi_uevent *ev, int msg_type)
+ struct iscsi_uevent *ev, int msg_type, u32 rlen)
{
struct iscsi_endpoint *ep;
int rc = 0;
@@ -3126,7 +3131,10 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
switch (msg_type) {
case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
- rc = iscsi_if_ep_connect(transport, ev, msg_type);
+ if (rlen < sizeof(struct sockaddr))
+ rc = -EINVAL;
+ else
+ rc = iscsi_if_ep_connect(transport, ev, msg_type);
break;
case ISCSI_UEVENT_TRANSPORT_EP_POLL:
if (!transport->ep_poll)
@@ -3150,12 +3158,15 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
static int
iscsi_tgt_dscvr(struct iscsi_transport *transport,
- struct iscsi_uevent *ev)
+ struct iscsi_uevent *ev, u32 rlen)
{
struct Scsi_Host *shost;
struct sockaddr *dst_addr;
int err;
+ if (rlen < sizeof(*dst_addr))
+ return -EINVAL;
+
if (!transport->tgt_dscvr)
return -EINVAL;
@@ -3176,7 +3187,7 @@ iscsi_tgt_dscvr(struct iscsi_transport *transport,
static int
iscsi_set_host_param(struct iscsi_transport *transport,
- struct iscsi_uevent *ev)
+ struct iscsi_uevent *ev, u32 rlen)
{
char *data = (char*)ev + sizeof(*ev);
struct Scsi_Host *shost;
@@ -3185,7 +3196,8 @@ iscsi_set_host_param(struct iscsi_transport *transport,
if (!transport->set_host_param)
return -ENOSYS;
- if (ev->u.set_host_param.len > PAGE_SIZE)
+ if (ev->u.set_host_param.len > rlen ||
+ ev->u.set_host_param.len > PAGE_SIZE)
return -EINVAL;
shost = scsi_host_lookup(ev->u.set_host_param.host_no);
@@ -3195,6 +3207,10 @@ iscsi_set_host_param(struct iscsi_transport *transport,
return -ENODEV;
}
+ /* see similar check in iscsi_if_set_param() */
+ if (strlen(data) > ev->u.set_host_param.len)
+ return -EINVAL;
+
err = transport->set_host_param(shost, ev->u.set_host_param.param,
data, ev->u.set_host_param.len);
scsi_host_put(shost);
@@ -3202,12 +3218,15 @@ iscsi_set_host_param(struct iscsi_transport *transport,
}
static int
-iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen)
{
struct Scsi_Host *shost;
struct iscsi_path *params;
int err;
+ if (rlen < sizeof(*params))
+ return -EINVAL;
+
if (!transport->set_path)
return -ENOSYS;
@@ -3267,12 +3286,15 @@ iscsi_set_iface_params(struct iscsi_transport *transport,
}
static int
-iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen)
{
struct Scsi_Host *shost;
struct sockaddr *dst_addr;
int err;
+ if (rlen < sizeof(*dst_addr))
+ return -EINVAL;
+
if (!transport->send_ping)
return -ENOSYS;
@@ -3770,13 +3792,12 @@ exit_host_stats:
}
static int iscsi_if_transport_conn(struct iscsi_transport *transport,
- struct nlmsghdr *nlh)
+ struct nlmsghdr *nlh, u32 pdu_len)
{
struct iscsi_uevent *ev = nlmsg_data(nlh);
struct iscsi_cls_session *session;
struct iscsi_cls_conn *conn = NULL;
struct iscsi_endpoint *ep;
- uint32_t pdu_len;
int err = 0;
switch (nlh->nlmsg_type) {
@@ -3861,8 +3882,6 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
break;
case ISCSI_UEVENT_SEND_PDU:
- pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
-
if ((ev->u.send_pdu.hdr_size > pdu_len) ||
(ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) {
err = -EINVAL;
@@ -3892,6 +3911,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
struct iscsi_internal *priv;
struct iscsi_cls_session *session;
struct iscsi_endpoint *ep = NULL;
+ u32 rlen;
if (!netlink_capable(skb, CAP_SYS_ADMIN))
return -EPERM;
@@ -3911,6 +3931,13 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
portid = NETLINK_CB(skb).portid;
+ /*
+ * Even though the remaining payload may not be regarded as nlattr,
+ * (like address or something else), calculate the remaining length
+ * here to ease following length checks.
+ */
+ rlen = nlmsg_attrlen(nlh, sizeof(*ev));
+
switch (nlh->nlmsg_type) {
case ISCSI_UEVENT_CREATE_SESSION:
err = iscsi_if_create_session(priv, ep, ev,
@@ -3967,7 +3994,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
err = -EINVAL;
break;
case ISCSI_UEVENT_SET_PARAM:
- err = iscsi_if_set_param(transport, ev);
+ err = iscsi_if_set_param(transport, ev, rlen);
break;
case ISCSI_UEVENT_CREATE_CONN:
case ISCSI_UEVENT_DESTROY_CONN:
@@ -3975,7 +4002,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
case ISCSI_UEVENT_START_CONN:
case ISCSI_UEVENT_BIND_CONN:
case ISCSI_UEVENT_SEND_PDU:
- err = iscsi_if_transport_conn(transport, nlh);
+ err = iscsi_if_transport_conn(transport, nlh, rlen);
break;
case ISCSI_UEVENT_GET_STATS:
err = iscsi_if_get_stats(transport, nlh);
@@ -3984,23 +4011,22 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
case ISCSI_UEVENT_TRANSPORT_EP_POLL:
case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
- err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type);
+ err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type, rlen);
break;
case ISCSI_UEVENT_TGT_DSCVR:
- err = iscsi_tgt_dscvr(transport, ev);
+ err = iscsi_tgt_dscvr(transport, ev, rlen);
break;
case ISCSI_UEVENT_SET_HOST_PARAM:
- err = iscsi_set_host_param(transport, ev);
+ err = iscsi_set_host_param(transport, ev, rlen);
break;
case ISCSI_UEVENT_PATH_UPDATE:
- err = iscsi_set_path(transport, ev);
+ err = iscsi_set_path(transport, ev, rlen);
break;
case ISCSI_UEVENT_SET_IFACE_PARAMS:
- err = iscsi_set_iface_params(transport, ev,
- nlmsg_attrlen(nlh, sizeof(*ev)));
+ err = iscsi_set_iface_params(transport, ev, rlen);
break;
case ISCSI_UEVENT_PING:
- err = iscsi_send_ping(transport, ev);
+ err = iscsi_send_ping(transport, ev, rlen);
break;
case ISCSI_UEVENT_GET_CHAP:
err = iscsi_get_chap(transport, nlh);
@@ -4009,13 +4035,10 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
err = iscsi_delete_chap(transport, ev);
break;
case ISCSI_UEVENT_SET_FLASHNODE_PARAMS:
- err = iscsi_set_flashnode_param(transport, ev,
- nlmsg_attrlen(nlh,
- sizeof(*ev)));
+ err = iscsi_set_flashnode_param(transport, ev, rlen);
break;
case ISCSI_UEVENT_NEW_FLASHNODE:
- err = iscsi_new_flashnode(transport, ev,
- nlmsg_attrlen(nlh, sizeof(*ev)));
+ err = iscsi_new_flashnode(transport, ev, rlen);
break;
case ISCSI_UEVENT_DEL_FLASHNODE:
err = iscsi_del_flashnode(transport, ev);
@@ -4030,8 +4053,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
err = iscsi_logout_flashnode_sid(transport, ev);
break;
case ISCSI_UEVENT_SET_CHAP:
- err = iscsi_set_chap(transport, ev,
- nlmsg_attrlen(nlh, sizeof(*ev)));
+ err = iscsi_set_chap(transport, ev, rlen);
break;
case ISCSI_UEVENT_GET_HOST_STATS:
err = iscsi_get_host_stats(transport, nlh);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3c668cfb146d..83b6a3f3863b 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -104,19 +104,7 @@ static void sd_config_discard(struct scsi_disk *, unsigned int);
static void sd_config_write_same(struct scsi_disk *);
static int sd_revalidate_disk(struct gendisk *);
static void sd_unlock_native_capacity(struct gendisk *disk);
-static int sd_probe(struct device *);
-static int sd_remove(struct device *);
static void sd_shutdown(struct device *);
-static int sd_suspend_system(struct device *);
-static int sd_suspend_runtime(struct device *);
-static int sd_resume_system(struct device *);
-static int sd_resume_runtime(struct device *);
-static void sd_rescan(struct device *);
-static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt);
-static void sd_uninit_command(struct scsi_cmnd *SCpnt);
-static int sd_done(struct scsi_cmnd *);
-static void sd_eh_reset(struct scsi_cmnd *);
-static int sd_eh_action(struct scsi_cmnd *, int);
static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
static void scsi_disk_release(struct device *cdev);
@@ -213,18 +201,63 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-manage_start_stop_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+manage_start_stop_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
- return sprintf(buf, "%u\n", sdp->manage_start_stop);
+ return sysfs_emit(buf, "%u\n",
+ sdp->manage_system_start_stop &&
+ sdp->manage_runtime_start_stop);
}
+static DEVICE_ATTR_RO(manage_start_stop);
static ssize_t
-manage_start_stop_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+manage_system_start_stop_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+
+ return sysfs_emit(buf, "%u\n", sdp->manage_system_start_stop);
+}
+
+static ssize_t
+manage_system_start_stop_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+ bool v;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdp->manage_system_start_stop = v;
+
+ return count;
+}
+static DEVICE_ATTR_RW(manage_system_start_stop);
+
+static ssize_t
+manage_runtime_start_stop_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+
+ return sysfs_emit(buf, "%u\n", sdp->manage_runtime_start_stop);
+}
+
+static ssize_t
+manage_runtime_start_stop_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
@@ -236,11 +269,11 @@ manage_start_stop_store(struct device *dev, struct device_attribute *attr,
if (kstrtobool(buf, &v))
return -EINVAL;
- sdp->manage_start_stop = v;
+ sdp->manage_runtime_start_stop = v;
return count;
}
-static DEVICE_ATTR_RW(manage_start_stop);
+static DEVICE_ATTR_RW(manage_runtime_start_stop);
static ssize_t
allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -572,6 +605,8 @@ static struct attribute *sd_disk_attrs[] = {
&dev_attr_FUA.attr,
&dev_attr_allow_restart.attr,
&dev_attr_manage_start_stop.attr,
+ &dev_attr_manage_system_start_stop.attr,
+ &dev_attr_manage_runtime_start_stop.attr,
&dev_attr_protection_type.attr,
&dev_attr_protection_mode.attr,
&dev_attr_app_tag_own.attr,
@@ -592,33 +627,6 @@ static struct class sd_disk_class = {
.dev_groups = sd_disk_groups,
};
-static const struct dev_pm_ops sd_pm_ops = {
- .suspend = sd_suspend_system,
- .resume = sd_resume_system,
- .poweroff = sd_suspend_system,
- .restore = sd_resume_system,
- .runtime_suspend = sd_suspend_runtime,
- .runtime_resume = sd_resume_runtime,
-};
-
-static struct scsi_driver sd_template = {
- .gendrv = {
- .name = "sd",
- .owner = THIS_MODULE,
- .probe = sd_probe,
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .remove = sd_remove,
- .shutdown = sd_shutdown,
- .pm = &sd_pm_ops,
- },
- .rescan = sd_rescan,
- .init_command = sd_init_command,
- .uninit_command = sd_uninit_command,
- .done = sd_done,
- .eh_action = sd_eh_action,
- .eh_reset = sd_eh_reset,
-};
-
/*
* Don't request a new module, as that could deadlock in multipath
* environment.
@@ -3733,7 +3741,8 @@ static int sd_remove(struct device *dev)
device_del(&sdkp->disk_dev);
del_gendisk(sdkp->disk);
- sd_shutdown(dev);
+ if (!sdkp->suspended)
+ sd_shutdown(dev);
put_disk(sdkp->disk);
return 0;
@@ -3810,13 +3819,20 @@ static void sd_shutdown(struct device *dev)
sd_sync_cache(sdkp, NULL);
}
- if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
+ if (system_state != SYSTEM_RESTART &&
+ sdkp->device->manage_system_start_stop) {
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
sd_start_stop_device(sdkp, 0);
}
}
-static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
+static inline bool sd_do_start_stop(struct scsi_device *sdev, bool runtime)
+{
+ return (sdev->manage_system_start_stop && !runtime) ||
+ (sdev->manage_runtime_start_stop && runtime);
+}
+
+static int sd_suspend_common(struct device *dev, bool runtime)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
struct scsi_sense_hdr sshdr;
@@ -3848,15 +3864,18 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
}
}
- if (sdkp->device->manage_start_stop) {
+ if (sd_do_start_stop(sdkp->device, runtime)) {
if (!sdkp->device->silence_suspend)
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
/* an error is not worth aborting a system sleep */
ret = sd_start_stop_device(sdkp, 0);
- if (ignore_stop_errors)
+ if (!runtime)
ret = 0;
}
+ if (!ret)
+ sdkp->suspended = true;
+
return ret;
}
@@ -3865,15 +3884,15 @@ static int sd_suspend_system(struct device *dev)
if (pm_runtime_suspended(dev))
return 0;
- return sd_suspend_common(dev, true);
+ return sd_suspend_common(dev, false);
}
static int sd_suspend_runtime(struct device *dev)
{
- return sd_suspend_common(dev, false);
+ return sd_suspend_common(dev, true);
}
-static int sd_resume(struct device *dev)
+static int sd_resume(struct device *dev, bool runtime)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
int ret = 0;
@@ -3881,16 +3900,21 @@ static int sd_resume(struct device *dev)
if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
return 0;
- if (!sdkp->device->manage_start_stop)
+ if (!sd_do_start_stop(sdkp->device, runtime)) {
+ sdkp->suspended = false;
return 0;
+ }
if (!sdkp->device->no_start_on_resume) {
sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
ret = sd_start_stop_device(sdkp, 1);
}
- if (!ret)
+ if (!ret) {
opal_unlock_from_suspend(sdkp->opal_dev);
+ sdkp->suspended = false;
+ }
+
return ret;
}
@@ -3899,7 +3923,7 @@ static int sd_resume_system(struct device *dev)
if (pm_runtime_suspended(dev))
return 0;
- return sd_resume(dev);
+ return sd_resume(dev, false);
}
static int sd_resume_runtime(struct device *dev)
@@ -3926,9 +3950,36 @@ static int sd_resume_runtime(struct device *dev)
"Failed to clear sense data\n");
}
- return sd_resume(dev);
+ return sd_resume(dev, true);
}
+static const struct dev_pm_ops sd_pm_ops = {
+ .suspend = sd_suspend_system,
+ .resume = sd_resume_system,
+ .poweroff = sd_suspend_system,
+ .restore = sd_resume_system,
+ .runtime_suspend = sd_suspend_runtime,
+ .runtime_resume = sd_resume_runtime,
+};
+
+static struct scsi_driver sd_template = {
+ .gendrv = {
+ .name = "sd",
+ .owner = THIS_MODULE,
+ .probe = sd_probe,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .remove = sd_remove,
+ .shutdown = sd_shutdown,
+ .pm = &sd_pm_ops,
+ },
+ .rescan = sd_rescan,
+ .init_command = sd_init_command,
+ .uninit_command = sd_uninit_command,
+ .done = sd_done,
+ .eh_action = sd_eh_action,
+ .eh_reset = sd_eh_reset,
+};
+
/**
* init_sd - entry point for this driver (both when built in or when
* a module).
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 5eea762f84d1..409dda5350d1 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -131,6 +131,7 @@ struct scsi_disk {
u8 provisioning_mode;
u8 zeroing_mode;
u8 nr_actuators; /* Number of actuators */
+ bool suspended; /* Disk is suspended (stopped) */
unsigned ATO : 1; /* state of disk ATO bit */
unsigned cache_override : 1; /* temp override of WCE,RCD */
unsigned WCE : 1; /* state of disk WCE bit */
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index e392eaf5b2bf..041940183516 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -710,7 +710,7 @@ typedef u32 pqi_index_t;
#define SOP_TMF_COMPLETE 0x0
#define SOP_TMF_REJECTED 0x4
#define SOP_TMF_FUNCTION_SUCCEEDED 0x8
-#define SOP_RC_INCORRECT_LOGICAL_UNIT 0x9
+#define SOP_TMF_INCORRECT_LOGICAL_UNIT 0x9
/* additional CDB bytes usage field codes */
#define SOP_ADDITIONAL_CDB_BYTES_0 0 /* 16-byte CDB */
@@ -1085,7 +1085,16 @@ struct pqi_stream_data {
u32 last_accessed;
};
-#define PQI_MAX_LUNS_PER_DEVICE 256
+#define PQI_MAX_LUNS_PER_DEVICE 256
+
+struct pqi_tmf_work {
+ struct work_struct work_struct;
+ struct scsi_cmnd *scmd;
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_scsi_dev *device;
+ u8 lun;
+ u8 scsi_opcode;
+};
struct pqi_scsi_dev {
int devtype; /* as reported by INQUIRY command */
@@ -1111,6 +1120,7 @@ struct pqi_scsi_dev {
u8 erase_in_progress : 1;
bool aio_enabled; /* only valid for physical disks */
bool in_remove;
+ bool in_reset[PQI_MAX_LUNS_PER_DEVICE];
bool device_offline;
u8 vendor[8]; /* bytes 8-15 of inquiry data */
u8 model[16]; /* bytes 16-31 of inquiry data */
@@ -1149,6 +1159,8 @@ struct pqi_scsi_dev {
struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE];
unsigned int raid_bypass_cnt;
+
+ struct pqi_tmf_work tmf_work[PQI_MAX_LUNS_PER_DEVICE];
};
/* VPD inquiry pages */
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 6aaaa7ebca37..9a58df9312fa 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.22-040"
+#define DRIVER_VERSION "2.1.24-046"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 22
-#define DRIVER_REVISION 40
+#define DRIVER_RELEASE 24
+#define DRIVER_REVISION 46
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -48,6 +48,8 @@
#define PQI_POST_RESET_DELAY_SECS 5
#define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10
+#define PQI_NO_COMPLETION ((void *)-1)
+
MODULE_AUTHOR("Microchip");
MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
DRIVER_VERSION);
@@ -96,6 +98,7 @@ static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
+static void pqi_tmf_worker(struct work_struct *work);
/* for flags argument to pqi_submit_raid_request_synchronous() */
#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
@@ -455,6 +458,21 @@ static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
return device->in_remove;
}
+static inline void pqi_device_reset_start(struct pqi_scsi_dev *device, u8 lun)
+{
+ device->in_reset[lun] = true;
+}
+
+static inline void pqi_device_reset_done(struct pqi_scsi_dev *device, u8 lun)
+{
+ device->in_reset[lun] = false;
+}
+
+static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device, u8 lun)
+{
+ return device->in_reset[lun];
+}
+
static inline int pqi_event_type_to_event_index(unsigned int event_type)
{
int index;
@@ -2137,6 +2155,15 @@ static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
return device->sdev != NULL;
}
+static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device)
+{
+ unsigned int lun;
+ struct pqi_tmf_work *tmf_work;
+
+ for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++)
+ INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker);
+}
+
static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
{
@@ -2217,6 +2244,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
list_add_tail(&device->add_list_entry, &add_list);
/* To prevent this device structure from being freed later. */
device->keep_device = true;
+ pqi_init_device_tmf_work(device);
}
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -2257,7 +2285,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
device->advertised_queue_depth = device->queue_depth;
scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
if (device->rescan) {
- scsi_rescan_device(&device->sdev->sdev_gendev);
+ scsi_rescan_device(device->sdev);
device->rescan = false;
}
}
@@ -3330,7 +3358,7 @@ static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_inf
case SOP_TMF_REJECTED:
rc = -EAGAIN;
break;
- case SOP_RC_INCORRECT_LOGICAL_UNIT:
+ case SOP_TMF_INCORRECT_LOGICAL_UNIT:
rc = -ENODEV;
break;
default:
@@ -5628,7 +5656,6 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
int rc;
struct pqi_io_request *io_request;
struct pqi_aio_path_request *request;
- struct pqi_scsi_dev *device;
io_request = pqi_alloc_io_request(ctrl_info, scmd);
if (!io_request)
@@ -5648,9 +5675,8 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
request->command_priority = io_high_prio;
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
- device = scmd->device->hostdata;
- if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
- put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number);
+ if (!raid_bypass && ctrl_info->multi_lun_device_supported)
+ put_unaligned_le64(scmd->device->lun << 8, &request->lun_number);
if (cdb_length > sizeof(request->cdb))
cdb_length = sizeof(request->cdb);
request->cdb_length = cdb_length;
@@ -5850,6 +5876,7 @@ static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
{
struct pqi_scsi_dev *device;
+ struct completion *wait;
if (!scmd->device) {
set_host_byte(scmd, DID_NO_CONNECT);
@@ -5863,6 +5890,10 @@ void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
}
atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
+
+ wait = (struct completion *)xchg(&scmd->host_scribble, NULL);
+ if (wait != PQI_NO_COMPLETION)
+ complete(wait);
}
static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
@@ -5948,6 +5979,9 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
u16 hw_queue;
struct pqi_queue_group *queue_group;
bool raid_bypassed;
+ u8 lun;
+
+ scmd->host_scribble = PQI_NO_COMPLETION;
device = scmd->device->hostdata;
@@ -5957,7 +5991,9 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
return 0;
}
- atomic_inc(&device->scsi_cmds_outstanding[scmd->device->lun]);
+ lun = (u8)scmd->device->lun;
+
+ atomic_inc(&device->scsi_cmds_outstanding[lun]);
ctrl_info = shost_to_hba(shost);
@@ -5967,7 +6003,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
return 0;
}
- if (pqi_ctrl_blocked(ctrl_info)) {
+ if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device, lun)) {
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
@@ -6002,8 +6038,10 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
}
out:
- if (rc)
- atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
+ if (rc) {
+ scmd->host_scribble = NULL;
+ atomic_dec(&device->scsi_cmds_outstanding[lun]);
+ }
return rc;
}
@@ -6097,7 +6135,7 @@ static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
}
static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+ struct pqi_scsi_dev *device, u8 lun)
{
unsigned int i;
unsigned int path;
@@ -6127,6 +6165,9 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
if (scsi_device != device)
continue;
+ if ((u8)scmd->device->lun != lun)
+ continue;
+
list_del(&io_request->request_list_entry);
set_host_byte(scmd, DID_RESET);
pqi_free_io_request(io_request);
@@ -6224,15 +6265,13 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
#define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
-static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
+static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
{
int rc;
struct pqi_io_request *io_request;
DECLARE_COMPLETION_ONSTACK(wait);
struct pqi_task_management_request *request;
- struct pqi_scsi_dev *device;
- device = scmd->device->hostdata;
io_request = pqi_alloc_io_request(ctrl_info, NULL);
io_request->io_complete_callback = pqi_lun_reset_complete;
io_request->context = &wait;
@@ -6247,7 +6286,7 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd
memcpy(request->lun_number, device->scsi3addr,
sizeof(request->lun_number));
if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
- request->ml_device_lun_number = (u8)scmd->device->lun;
+ request->ml_device_lun_number = lun;
request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
if (ctrl_info->tmf_iu_timeout_supported)
put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
@@ -6255,7 +6294,7 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd
pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
io_request);
- rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, (u8)scmd->device->lun, &wait);
+ rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, lun, &wait);
if (rc == 0)
rc = io_request->status;
@@ -6269,18 +6308,16 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd
#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
#define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
-static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
+static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
{
int reset_rc;
int wait_rc;
unsigned int retries;
unsigned long timeout_msecs;
- struct pqi_scsi_dev *device;
- device = scmd->device->hostdata;
for (retries = 0;;) {
- reset_rc = pqi_lun_reset(ctrl_info, scmd);
- if (reset_rc == 0 || reset_rc == -ENODEV || ++retries > PQI_LUN_RESET_RETRIES)
+ reset_rc = pqi_lun_reset(ctrl_info, device, lun);
+ if (reset_rc == 0 || reset_rc == -ENODEV || reset_rc == -ENXIO || ++retries > PQI_LUN_RESET_RETRIES)
break;
msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
}
@@ -6288,60 +6325,51 @@ static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct sc
timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
- wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, scmd->device->lun, timeout_msecs);
+ wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, timeout_msecs);
if (wait_rc && reset_rc == 0)
reset_rc = wait_rc;
return reset_rc == 0 ? SUCCESS : FAILED;
}
-static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
+static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
{
int rc;
- struct pqi_scsi_dev *device;
- device = scmd->device->hostdata;
pqi_ctrl_block_requests(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
- pqi_fail_io_queued_for_device(ctrl_info, device);
+ pqi_fail_io_queued_for_device(ctrl_info, device, lun);
rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
+ pqi_device_reset_start(device, lun);
+ pqi_ctrl_unblock_requests(ctrl_info);
if (rc)
rc = FAILED;
else
- rc = pqi_lun_reset_with_retries(ctrl_info, scmd);
- pqi_ctrl_unblock_requests(ctrl_info);
+ rc = pqi_lun_reset_with_retries(ctrl_info, device, lun);
+ pqi_device_reset_done(device, lun);
return rc;
}
-static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
+static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode)
{
int rc;
- struct Scsi_Host *shost;
- struct pqi_ctrl_info *ctrl_info;
- struct pqi_scsi_dev *device;
-
- shost = scmd->device->host;
- ctrl_info = shost_to_hba(shost);
- device = scmd->device->hostdata;
mutex_lock(&ctrl_info->lun_reset_mutex);
dev_err(&ctrl_info->pci_dev->dev,
- "resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
- shost->host_no,
- device->bus, device->target, (u32)scmd->device->lun,
- scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
+ "resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode);
pqi_check_ctrl_health(ctrl_info);
if (pqi_ctrl_offline(ctrl_info))
rc = FAILED;
else
- rc = pqi_device_reset(ctrl_info, scmd);
+ rc = pqi_device_reset(ctrl_info, device, lun);
dev_err(&ctrl_info->pci_dev->dev,
- "reset of scsi %d:%d:%d:%d: %s\n",
- shost->host_no, device->bus, device->target, (u32)scmd->device->lun,
+ "reset of scsi %d:%d:%d:%u: %s\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target, lun,
rc == SUCCESS ? "SUCCESS" : "FAILED");
mutex_unlock(&ctrl_info->lun_reset_mutex);
@@ -6349,6 +6377,77 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
return rc;
}
+static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
+{
+ struct Scsi_Host *shost;
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_scsi_dev *device;
+ u8 scsi_opcode;
+
+ shost = scmd->device->host;
+ ctrl_info = shost_to_hba(shost);
+ device = scmd->device->hostdata;
+ scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
+
+ return pqi_device_reset_handler(ctrl_info, device, (u8)scmd->device->lun, scmd, scsi_opcode);
+}
+
+static void pqi_tmf_worker(struct work_struct *work)
+{
+ struct pqi_tmf_work *tmf_work;
+ struct scsi_cmnd *scmd;
+
+ tmf_work = container_of(work, struct pqi_tmf_work, work_struct);
+ scmd = (struct scsi_cmnd *)xchg(&tmf_work->scmd, NULL);
+
+ pqi_device_reset_handler(tmf_work->ctrl_info, tmf_work->device, tmf_work->lun, scmd, tmf_work->scsi_opcode);
+}
+
+static int pqi_eh_abort_handler(struct scsi_cmnd *scmd)
+{
+ struct Scsi_Host *shost;
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_scsi_dev *device;
+ struct pqi_tmf_work *tmf_work;
+ DECLARE_COMPLETION_ONSTACK(wait);
+
+ shost = scmd->device->host;
+ ctrl_info = shost_to_hba(shost);
+ device = scmd->device->hostdata;
+
+ dev_err(&ctrl_info->pci_dev->dev,
+ "attempting TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p\n",
+ shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
+
+ if (cmpxchg(&scmd->host_scribble, PQI_NO_COMPLETION, (void *)&wait) == NULL) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d for SCSI cmd at %p already completed\n",
+ shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
+ scmd->result = DID_RESET << 16;
+ goto out;
+ }
+
+ tmf_work = &device->tmf_work[scmd->device->lun];
+
+ if (cmpxchg(&tmf_work->scmd, NULL, scmd) == NULL) {
+ tmf_work->ctrl_info = ctrl_info;
+ tmf_work->device = device;
+ tmf_work->lun = (u8)scmd->device->lun;
+ tmf_work->scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
+ schedule_work(&tmf_work->work_struct);
+ }
+
+ wait_for_completion(&wait);
+
+ dev_err(&ctrl_info->pci_dev->dev,
+ "TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p: SUCCESS\n",
+ shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
+
+out:
+
+ return SUCCESS;
+}
+
static int pqi_slave_alloc(struct scsi_device *sdev)
{
struct pqi_scsi_dev *device;
@@ -6470,21 +6569,21 @@ static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *ar
struct pci_dev *pci_dev;
u32 subsystem_vendor;
u32 subsystem_device;
- cciss_pci_info_struct pciinfo;
+ cciss_pci_info_struct pci_info;
if (!arg)
return -EINVAL;
pci_dev = ctrl_info->pci_dev;
- pciinfo.domain = pci_domain_nr(pci_dev->bus);
- pciinfo.bus = pci_dev->bus->number;
- pciinfo.dev_fn = pci_dev->devfn;
+ pci_info.domain = pci_domain_nr(pci_dev->bus);
+ pci_info.bus = pci_dev->bus->number;
+ pci_info.dev_fn = pci_dev->devfn;
subsystem_vendor = pci_dev->subsystem_vendor;
subsystem_device = pci_dev->subsystem_device;
- pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
+ pci_info.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
- if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
+ if (copy_to_user(arg, &pci_info, sizeof(pci_info)))
return -EFAULT;
return 0;
@@ -7362,6 +7461,7 @@ static const struct scsi_host_template pqi_driver_template = {
.scan_finished = pqi_scan_finished,
.this_id = -1,
.eh_device_reset_handler = pqi_eh_device_reset_handler,
+ .eh_abort_handler = pqi_eh_abort_handler,
.ioctl = pqi_ioctl,
.slave_alloc = pqi_slave_alloc,
.slave_configure = pqi_slave_configure,
@@ -8904,6 +9004,52 @@ static void pqi_ctrl_offline_worker(struct work_struct *work)
pqi_take_ctrl_offline_deferred(ctrl_info);
}
+static char *pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
+{
+ char *string;
+
+ switch (ctrl_shutdown_reason) {
+ case PQI_IQ_NOT_DRAINED_TIMEOUT:
+ string = "inbound queue not drained timeout";
+ break;
+ case PQI_LUN_RESET_TIMEOUT:
+ string = "LUN reset timeout";
+ break;
+ case PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT:
+ string = "I/O pending timeout after LUN reset";
+ break;
+ case PQI_NO_HEARTBEAT:
+ string = "no controller heartbeat detected";
+ break;
+ case PQI_FIRMWARE_KERNEL_NOT_UP:
+ string = "firmware kernel not ready";
+ break;
+ case PQI_OFA_RESPONSE_TIMEOUT:
+ string = "OFA response timeout";
+ break;
+ case PQI_INVALID_REQ_ID:
+ string = "invalid request ID";
+ break;
+ case PQI_UNMATCHED_REQ_ID:
+ string = "unmatched request ID";
+ break;
+ case PQI_IO_PI_OUT_OF_RANGE:
+ string = "I/O queue producer index out of range";
+ break;
+ case PQI_EVENT_PI_OUT_OF_RANGE:
+ string = "event queue producer index out of range";
+ break;
+ case PQI_UNEXPECTED_IU_TYPE:
+ string = "unexpected IU type";
+ break;
+ default:
+ string = "unknown reason";
+ break;
+ }
+
+ return string;
+}
+
static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
{
@@ -8916,7 +9062,9 @@ static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
if (!pqi_disable_ctrl_shutdown)
sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
pci_disable_device(ctrl_info->pci_dev);
- dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
+ dev_err(&ctrl_info->pci_dev->dev,
+ "controller offline: reason code 0x%x (%s)\n",
+ ctrl_shutdown_reason, pqi_ctrl_shutdown_reason_to_string(ctrl_shutdown_reason));
schedule_work(&ctrl_info->ctrl_offline_work);
}
@@ -9062,7 +9210,7 @@ static void pqi_shutdown(struct pci_dev *pci_dev)
rc = pqi_flush_cache(ctrl_info, shutdown_event);
if (rc)
dev_err(&pci_dev->dev,
- "unable to flush controller cache\n");
+ "unable to flush controller cache during shutdown\n");
pqi_crash_if_pending_command(ctrl_info);
pqi_reset(ctrl_info);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 14d7981ddcdd..338aa8c42968 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -414,6 +414,8 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
if (cmdstatp->have_sense &&
cmdstatp->sense_hdr.asc == 0 && cmdstatp->sense_hdr.ascq == 0x17)
STp->cleaning_req = 1; /* ASC and ASCQ => cleaning requested */
+ if (cmdstatp->have_sense && scode == UNIT_ATTENTION && cmdstatp->sense_hdr.asc == 0x29)
+ STp->pos_unknown = 1; /* ASC => power on / reset */
STp->pos_unknown |= STp->device->was_reset;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 047ffaf7d42a..a95936b18f69 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -316,6 +316,9 @@ enum storvsc_request_type {
#define SRB_STATUS_ABORTED 0x02
#define SRB_STATUS_ERROR 0x04
#define SRB_STATUS_INVALID_REQUEST 0x06
+#define SRB_STATUS_TIMEOUT 0x09
+#define SRB_STATUS_SELECTION_TIMEOUT 0x0A
+#define SRB_STATUS_BUS_RESET 0x0E
#define SRB_STATUS_DATA_OVERRUN 0x12
#define SRB_STATUS_INVALID_LUN 0x20
#define SRB_STATUS_INTERNAL_ERROR 0x30
@@ -472,7 +475,7 @@ static void storvsc_device_scan(struct work_struct *work)
sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
if (!sdev)
goto done;
- scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_rescan_device(sdev);
scsi_device_put(sdev);
done:
@@ -981,6 +984,10 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
case SRB_STATUS_ABORTED:
case SRB_STATUS_INVALID_REQUEST:
case SRB_STATUS_INTERNAL_ERROR:
+ case SRB_STATUS_TIMEOUT:
+ case SRB_STATUS_SELECTION_TIMEOUT:
+ case SRB_STATUS_BUS_RESET:
+ case SRB_STATUS_DATA_OVERRUN:
if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) {
/* Check for capacity change */
if ((asc == 0x2a) && (ascq == 0x9)) {
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index d06e933191a2..afa9d02a33ec 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -12,7 +12,8 @@
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/gfp.h>
#include <asm/irq.h>
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index bd5633667d01..9d1bdcdc1331 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -325,7 +325,7 @@ static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
/* Handle "Parameters changed", "Mode parameters changed", and
"Capacity data has changed". */
if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
- scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_rescan_device(sdev);
scsi_device_put(sdev);
}
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index caae61aa2afe..9ec55ddc1204 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -743,7 +743,7 @@ static int scsifront_sdev_configure(struct scsi_device *sdev)
if (info->host_active == STATE_ERROR)
return -EIO;
- if (info && current == info->curr) {
+ if (current == info->curr) {
err = xenbus_printf(XBT_NIL, info->dev->nodename,
info->dev_state_path, "%d", XenbusStateConnected);
if (err) {
@@ -761,7 +761,7 @@ static void scsifront_sdev_destroy(struct scsi_device *sdev)
struct vscsifrnt_info *info = shost_priv(sdev->host);
int err;
- if (info && current == info->curr) {
+ if (current == info->curr) {
err = xenbus_printf(XBT_NIL, info->dev->nodename,
info->dev_state_path, "%d", XenbusStateClosed);
if (err)
@@ -903,7 +903,7 @@ static int scsifront_probe(struct xenbus_device *dev,
xenbus_dev_fatal(dev, err, "fail to allocate scsi host");
return err;
}
- info = (struct vscsifrnt_info *)host->hostdata;
+ info = shost_priv(host);
dev_set_drvdata(&dev->dev, info);
info->dev = dev;
diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
index 1dcd243df567..ec87d9d878f3 100644
--- a/drivers/soc/imx/soc-imx8m.c
+++ b/drivers/soc/imx/soc-imx8m.c
@@ -100,6 +100,7 @@ static void __init imx8mm_soc_uid(void)
{
void __iomem *ocotp_base;
struct device_node *np;
+ struct clk *clk;
u32 offset = of_machine_is_compatible("fsl,imx8mp") ?
IMX8MP_OCOTP_UID_OFFSET : 0;
@@ -109,11 +110,20 @@ static void __init imx8mm_soc_uid(void)
ocotp_base = of_iomap(np, 0);
WARN_ON(!ocotp_base);
+ clk = of_clk_get_by_name(np, NULL);
+ if (IS_ERR(clk)) {
+ WARN_ON(IS_ERR(clk));
+ return;
+ }
+
+ clk_prepare_enable(clk);
soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset);
soc_uid <<= 32;
soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset);
+ clk_disable_unprepare(clk);
+ clk_put(clk);
iounmap(ocotp_base);
of_node_put(np);
}
diff --git a/drivers/soc/loongson/Kconfig b/drivers/soc/loongson/Kconfig
index 314e13bb3e01..368344943a93 100644
--- a/drivers/soc/loongson/Kconfig
+++ b/drivers/soc/loongson/Kconfig
@@ -20,6 +20,7 @@ config LOONGSON2_GUTS
config LOONGSON2_PM
bool "Loongson-2 SoC Power Management Controller Driver"
depends on LOONGARCH && OF
+ depends on INPUT=y
help
The Loongson-2's power management controller was ACPI, supports ACPI
S2Idle (Suspend To Idle), ACPI S3 (Suspend To RAM), ACPI S4 (Suspend To
diff --git a/drivers/soc/loongson/loongson2_guts.c b/drivers/soc/loongson/loongson2_guts.c
index bace4bc8e03b..9a469779eea7 100644
--- a/drivers/soc/loongson/loongson2_guts.c
+++ b/drivers/soc/loongson/loongson2_guts.c
@@ -70,7 +70,7 @@ static const struct loongson2_soc_die_attr *loongson2_soc_die_match(
if (matches->svr == (svr & matches->mask))
return matches;
matches++;
- };
+ }
return NULL;
}
@@ -94,7 +94,6 @@ static int loongson2_guts_probe(struct platform_device *pdev)
{
struct device_node *root, *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
- struct resource *res;
const struct loongson2_soc_die_attr *soc_die;
const char *machine;
u32 svr;
@@ -106,8 +105,7 @@ static int loongson2_guts_probe(struct platform_device *pdev)
guts->little_endian = of_property_read_bool(np, "little-endian");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- guts->regs = ioremap(res->start, res->end - res->start + 1);
+ guts->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(guts->regs))
return PTR_ERR(guts->regs);
diff --git a/drivers/soc/loongson/loongson2_pm.c b/drivers/soc/loongson/loongson2_pm.c
index 796add6e8b63..b8e5e1e3528a 100644
--- a/drivers/soc/loongson/loongson2_pm.c
+++ b/drivers/soc/loongson/loongson2_pm.c
@@ -11,6 +11,7 @@
#include <linux/input.h>
#include <linux/suspend.h>
#include <linux/interrupt.h>
+#include <linux/of_platform.h>
#include <linux/pm_wakeirq.h>
#include <linux/platform_device.h>
#include <asm/bootinfo.h>
@@ -192,12 +193,16 @@ static int loongson2_pm_probe(struct platform_device *pdev)
if (loongson_sysconf.suspend_addr)
suspend_set_ops(&loongson2_suspend_ops);
+ /* Populate children */
+ retval = devm_of_platform_populate(dev);
+ if (retval)
+ dev_err(dev, "Error populating children, reboot and poweroff might not work properly\n");
+
return 0;
}
static const struct of_device_id loongson2_pm_match[] = {
{ .compatible = "loongson,ls2k0500-pmc", },
- { .compatible = "loongson,ls2k1000-pmc", },
{},
};
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index de31589ed054..5a75ab64d1ed 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -334,6 +334,11 @@ if RISCV
config ARCH_R9A07G043
bool "RISC-V Platform support for RZ/Five"
select ARCH_RZG2L
+ select AX45MP_L2_CACHE if RISCV_DMA_NONCOHERENT
+ select DMA_GLOBAL_POOL
+ select ERRATA_ANDES if RISCV_SBI
+ select ERRATA_ANDES_CMO if ERRATA_ANDES
+
help
This enables support for the Renesas RZ/Five SoC.
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 1cc2281cb370..1720031f35a3 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -13,7 +13,6 @@
#include "sysfs_local.h"
static DEFINE_IDA(sdw_bus_ida);
-static DEFINE_IDA(sdw_peripheral_ida);
static int sdw_get_id(struct sdw_bus *bus)
{
@@ -194,8 +193,8 @@ static int sdw_delete_slave(struct device *dev, void *data)
if (slave->dev_num) { /* clear dev_num if assigned */
clear_bit(slave->dev_num, bus->assigned);
- if (bus->dev_num_ida_min)
- ida_free(&sdw_peripheral_ida, slave->dev_num);
+ if (bus->ops && bus->ops->put_device_num)
+ bus->ops->put_device_num(bus, slave);
}
list_del_init(&slave->node);
mutex_unlock(&bus->bus_lock);
@@ -739,16 +738,15 @@ EXPORT_SYMBOL(sdw_compare_devid);
/* called with bus_lock held */
static int sdw_get_device_num(struct sdw_slave *slave)
{
+ struct sdw_bus *bus = slave->bus;
int bit;
- if (slave->bus->dev_num_ida_min) {
- bit = ida_alloc_range(&sdw_peripheral_ida,
- slave->bus->dev_num_ida_min, SDW_MAX_DEVICES,
- GFP_KERNEL);
+ if (bus->ops && bus->ops->get_device_num) {
+ bit = bus->ops->get_device_num(bus, slave);
if (bit < 0)
goto err;
} else {
- bit = find_first_zero_bit(slave->bus->assigned, SDW_MAX_DEVICES);
+ bit = find_first_zero_bit(bus->assigned, SDW_MAX_DEVICES);
if (bit == SDW_MAX_DEVICES) {
bit = -ENODEV;
goto err;
@@ -759,7 +757,7 @@ static int sdw_get_device_num(struct sdw_slave *slave)
* Do not update dev_num in Slave data structure here,
* Update once program dev_num is successful
*/
- set_bit(bit, slave->bus->assigned);
+ set_bit(bit, bus->assigned);
err:
return bit;
@@ -810,7 +808,7 @@ static int sdw_assign_device_num(struct sdw_slave *slave)
slave->dev_num = slave->dev_num_sticky;
if (bus->ops && bus->ops->new_peripheral_assigned)
- bus->ops->new_peripheral_assigned(bus, dev_num);
+ bus->ops->new_peripheral_assigned(bus, slave, dev_num);
return 0;
}
diff --git a/drivers/soundwire/intel_ace2x.c b/drivers/soundwire/intel_ace2x.c
index 1be0bea5f40f..a9d25ae0b73f 100644
--- a/drivers/soundwire/intel_ace2x.c
+++ b/drivers/soundwire/intel_ace2x.c
@@ -10,6 +10,7 @@
#include <linux/soundwire/sdw_registers.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_intel.h>
+#include <sound/pcm_params.h>
#include <sound/hda-mlink.h>
#include "cadence_master.h"
#include "bus.h"
@@ -191,10 +192,292 @@ static bool intel_check_cmdsync_unlocked(struct sdw_intel *sdw)
return hdac_bus_eml_sdw_check_cmdsync_unlocked(sdw->link_res->hbus);
}
+/* DAI callbacks */
+static int intel_params_stream(struct sdw_intel *sdw,
+ struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai,
+ struct snd_pcm_hw_params *hw_params,
+ int link_id, int alh_stream_id)
+{
+ struct sdw_intel_link_res *res = sdw->link_res;
+ struct sdw_intel_stream_params_data params_data;
+
+ params_data.substream = substream;
+ params_data.dai = dai;
+ params_data.hw_params = hw_params;
+ params_data.link_id = link_id;
+ params_data.alh_stream_id = alh_stream_id;
+
+ if (res->ops && res->ops->params_stream && res->dev)
+ return res->ops->params_stream(res->dev,
+ &params_data);
+ return -EIO;
+}
+
+static int intel_free_stream(struct sdw_intel *sdw,
+ struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai,
+ int link_id)
+
+{
+ struct sdw_intel_link_res *res = sdw->link_res;
+ struct sdw_intel_stream_free_data free_data;
+
+ free_data.substream = substream;
+ free_data.dai = dai;
+ free_data.link_id = link_id;
+
+ if (res->ops && res->ops->free_stream && res->dev)
+ return res->ops->free_stream(res->dev,
+ &free_data);
+
+ return 0;
+}
+
/*
* DAI operations
*/
+static int intel_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_cdns_dai_runtime *dai_runtime;
+ struct sdw_cdns_pdi *pdi;
+ struct sdw_stream_config sconfig;
+ struct sdw_port_config *pconfig;
+ int ch, dir;
+ int ret;
+
+ dai_runtime = cdns->dai_runtime_array[dai->id];
+ if (!dai_runtime)
+ return -EIO;
+
+ ch = params_channels(params);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ dir = SDW_DATA_DIR_RX;
+ else
+ dir = SDW_DATA_DIR_TX;
+
+ pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
+
+ if (!pdi) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* the SHIM will be configured in the callback functions */
+
+ sdw_cdns_config_stream(cdns, ch, dir, pdi);
+
+ /* store pdi and state, may be needed in prepare step */
+ dai_runtime->paused = false;
+ dai_runtime->suspended = false;
+ dai_runtime->pdi = pdi;
+
+ /* Inform DSP about PDI stream number */
+ ret = intel_params_stream(sdw, substream, dai, params,
+ sdw->instance,
+ pdi->intel_alh_id);
+ if (ret)
+ goto error;
+
+ sconfig.direction = dir;
+ sconfig.ch_count = ch;
+ sconfig.frame_rate = params_rate(params);
+ sconfig.type = dai_runtime->stream_type;
+
+ sconfig.bps = snd_pcm_format_width(params_format(params));
+
+ /* Port configuration */
+ pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL);
+ if (!pconfig) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ pconfig->num = pdi->num;
+ pconfig->ch_mask = (1 << ch) - 1;
+
+ ret = sdw_stream_add_master(&cdns->bus, &sconfig,
+ pconfig, 1, dai_runtime->stream);
+ if (ret)
+ dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
+
+ kfree(pconfig);
+error:
+ return ret;
+}
+
+static int intel_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_cdns_dai_runtime *dai_runtime;
+ int ch, dir;
+ int ret = 0;
+
+ dai_runtime = cdns->dai_runtime_array[dai->id];
+ if (!dai_runtime) {
+ dev_err(dai->dev, "failed to get dai runtime in %s\n",
+ __func__);
+ return -EIO;
+ }
+
+ if (dai_runtime->suspended) {
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_pcm_hw_params *hw_params;
+
+ hw_params = &rtd->dpcm[substream->stream].hw_params;
+
+ dai_runtime->suspended = false;
+
+ /*
+ * .prepare() is called after system resume, where we
+ * need to reinitialize the SHIM/ALH/Cadence IP.
+ * .prepare() is also called to deal with underflows,
+ * but in those cases we cannot touch ALH/SHIM
+ * registers
+ */
+
+ /* configure stream */
+ ch = params_channels(hw_params);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ dir = SDW_DATA_DIR_RX;
+ else
+ dir = SDW_DATA_DIR_TX;
+
+ /* the SHIM will be configured in the callback functions */
+
+ sdw_cdns_config_stream(cdns, ch, dir, dai_runtime->pdi);
+
+ /* Inform DSP about PDI stream number */
+ ret = intel_params_stream(sdw, substream, dai,
+ hw_params,
+ sdw->instance,
+ dai_runtime->pdi->intel_alh_id);
+ }
+
+ return ret;
+}
+
+static int
+intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+{
+ struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_cdns_dai_runtime *dai_runtime;
+ int ret;
+
+ dai_runtime = cdns->dai_runtime_array[dai->id];
+ if (!dai_runtime)
+ return -EIO;
+
+ /*
+ * The sdw stream state will transition to RELEASED when stream->
+ * master_list is empty. So the stream state will transition to
+ * DEPREPARED for the first cpu-dai and to RELEASED for the last
+ * cpu-dai.
+ */
+ ret = sdw_stream_remove_master(&cdns->bus, dai_runtime->stream);
+ if (ret < 0) {
+ dev_err(dai->dev, "remove master from stream %s failed: %d\n",
+ dai_runtime->stream->name, ret);
+ return ret;
+ }
+
+ ret = intel_free_stream(sdw, substream, dai, sdw->instance);
+ if (ret < 0) {
+ dev_err(dai->dev, "intel_free_stream: failed %d\n", ret);
+ return ret;
+ }
+
+ dai_runtime->pdi = NULL;
+
+ return 0;
+}
+
+static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
+ void *stream, int direction)
+{
+ return cdns_set_sdw_stream(dai, stream, direction);
+}
+
+static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
+ int direction)
+{
+ struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ struct sdw_cdns_dai_runtime *dai_runtime;
+
+ dai_runtime = cdns->dai_runtime_array[dai->id];
+ if (!dai_runtime)
+ return ERR_PTR(-EINVAL);
+
+ return dai_runtime->stream;
+}
+
+static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai)
+{
+ struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_intel_link_res *res = sdw->link_res;
+ struct sdw_cdns_dai_runtime *dai_runtime;
+ int ret = 0;
+
+ /*
+ * The .trigger callback is used to program HDaudio DMA and send required IPC to audio
+ * firmware.
+ */
+ if (res->ops && res->ops->trigger) {
+ ret = res->ops->trigger(substream, cmd, dai);
+ if (ret < 0)
+ return ret;
+ }
+
+ dai_runtime = cdns->dai_runtime_array[dai->id];
+ if (!dai_runtime) {
+ dev_err(dai->dev, "failed to get dai runtime in %s\n",
+ __func__);
+ return -EIO;
+ }
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+
+ /*
+ * The .prepare callback is used to deal with xruns and resume operations.
+ * In the case of xruns, the DMAs and SHIM registers cannot be touched,
+ * but for resume operations the DMAs and SHIM registers need to be initialized.
+ * the .trigger callback is used to track the suspend case only.
+ */
+
+ dai_runtime->suspended = true;
+
+ break;
+
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ dai_runtime->paused = true;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ dai_runtime->paused = false;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
+ .hw_params = intel_hw_params,
+ .prepare = intel_prepare,
+ .hw_free = intel_hw_free,
+ .trigger = intel_trigger,
+ .set_stream = intel_pcm_set_sdw_stream,
+ .get_stream = intel_get_sdw_stream,
};
static const struct snd_soc_component_driver dai_component = {
diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c
index 0daa6ca9a224..7f15e3549e53 100644
--- a/drivers/soundwire/intel_auxdevice.c
+++ b/drivers/soundwire/intel_auxdevice.c
@@ -23,9 +23,6 @@
#include "intel.h"
#include "intel_auxdevice.h"
-/* IDA min selected to avoid conflicts with HDaudio/iDISP SDI values */
-#define INTEL_DEV_NUM_IDA_MIN 4
-
#define INTEL_MASTER_SUSPEND_DELAY_MS 3000
/*
@@ -44,6 +41,39 @@ static int md_flags;
module_param_named(sdw_md_flags, md_flags, int, 0444);
MODULE_PARM_DESC(sdw_md_flags, "SoundWire Intel Master device flags (0x0 all off)");
+struct wake_capable_part {
+ const u16 mfg_id;
+ const u16 part_id;
+};
+
+static struct wake_capable_part wake_capable_list[] = {
+ {0x025d, 0x5682},
+ {0x025d, 0x700},
+ {0x025d, 0x711},
+ {0x025d, 0x1712},
+ {0x025d, 0x1713},
+ {0x025d, 0x1716},
+ {0x025d, 0x1717},
+ {0x025d, 0x712},
+ {0x025d, 0x713},
+ {0x025d, 0x714},
+ {0x025d, 0x715},
+ {0x025d, 0x716},
+ {0x025d, 0x717},
+ {0x025d, 0x722},
+};
+
+static bool is_wake_capable(struct sdw_slave *slave)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wake_capable_list); i++)
+ if (slave->id.part_id == wake_capable_list[i].part_id &&
+ slave->id.mfg_id == wake_capable_list[i].mfg_id)
+ return true;
+ return false;
+}
+
static int generic_pre_bank_switch(struct sdw_bus *bus)
{
struct sdw_cdns *cdns = bus_to_cdns(bus);
@@ -60,18 +90,32 @@ static int generic_post_bank_switch(struct sdw_bus *bus)
return sdw->link_res->hw_ops->post_bank_switch(sdw);
}
-static void generic_new_peripheral_assigned(struct sdw_bus *bus, int dev_num)
+static void generic_new_peripheral_assigned(struct sdw_bus *bus,
+ struct sdw_slave *slave,
+ int dev_num)
{
struct sdw_cdns *cdns = bus_to_cdns(bus);
struct sdw_intel *sdw = cdns_to_intel(cdns);
+ int dev_num_min;
+ int dev_num_max;
+ bool wake_capable = slave->prop.wake_capable || is_wake_capable(slave);
+
+ if (wake_capable) {
+ dev_num_min = SDW_INTEL_DEV_NUM_IDA_MIN;
+ dev_num_max = SDW_MAX_DEVICES;
+ } else {
+ dev_num_min = 1;
+ dev_num_max = SDW_INTEL_DEV_NUM_IDA_MIN - 1;
+ }
/* paranoia check, this should never happen */
- if (dev_num < INTEL_DEV_NUM_IDA_MIN || dev_num > SDW_MAX_DEVICES) {
- dev_err(bus->dev, "%s: invalid dev_num %d\n", __func__, dev_num);
+ if (dev_num < dev_num_min || dev_num > dev_num_max) {
+ dev_err(bus->dev, "%s: invalid dev_num %d, wake supported %d\n",
+ __func__, dev_num, slave->prop.wake_capable);
return;
}
- if (sdw->link_res->hw_ops->program_sdi)
+ if (sdw->link_res->hw_ops->program_sdi && wake_capable)
sdw->link_res->hw_ops->program_sdi(sdw, dev_num);
}
@@ -123,6 +167,30 @@ static int intel_prop_read(struct sdw_bus *bus)
return 0;
}
+static DEFINE_IDA(intel_peripheral_ida);
+
+static int intel_get_device_num_ida(struct sdw_bus *bus, struct sdw_slave *slave)
+{
+ int bit;
+
+ if (slave->prop.wake_capable || is_wake_capable(slave))
+ return ida_alloc_range(&intel_peripheral_ida,
+ SDW_INTEL_DEV_NUM_IDA_MIN, SDW_MAX_DEVICES,
+ GFP_KERNEL);
+
+ bit = find_first_zero_bit(slave->bus->assigned, SDW_MAX_DEVICES);
+ if (bit == SDW_MAX_DEVICES)
+ return -ENODEV;
+
+ return bit;
+}
+
+static void intel_put_device_num_ida(struct sdw_bus *bus, struct sdw_slave *slave)
+{
+ if (slave->prop.wake_capable || is_wake_capable(slave))
+ ida_free(&intel_peripheral_ida, slave->dev_num);
+}
+
static struct sdw_master_ops sdw_intel_ops = {
.read_prop = intel_prop_read,
.override_adr = sdw_dmi_override_adr,
@@ -132,6 +200,8 @@ static struct sdw_master_ops sdw_intel_ops = {
.pre_bank_switch = generic_pre_bank_switch,
.post_bank_switch = generic_post_bank_switch,
.read_ping_status = cdns_read_ping_status,
+ .get_device_num = intel_get_device_num_ida,
+ .put_device_num = intel_put_device_num_ida,
.new_peripheral_assigned = generic_new_peripheral_assigned,
};
@@ -165,7 +235,6 @@ static int intel_link_probe(struct auxiliary_device *auxdev,
cdns->msg_count = 0;
bus->link_id = auxdev->id;
- bus->dev_num_ida_min = INTEL_DEV_NUM_IDA_MIN;
bus->clk_stop_timeout = 1;
sdw_cdns_probe(cdns);
@@ -248,13 +317,6 @@ int intel_link_startup(struct auxiliary_device *auxdev)
sdw_intel_debugfs_init(sdw);
- /* start bus */
- ret = sdw_intel_start_bus(sdw);
- if (ret) {
- dev_err(dev, "bus start failed: %d\n", ret);
- goto err_power_up;
- }
-
/* Enable runtime PM */
if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME)) {
pm_runtime_set_autosuspend_delay(dev,
@@ -264,6 +326,15 @@ int intel_link_startup(struct auxiliary_device *auxdev)
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
+
+ pm_runtime_resume(bus->dev);
+ }
+
+ /* start bus */
+ ret = sdw_intel_start_bus(sdw);
+ if (ret) {
+ dev_err(dev, "bus start failed: %d\n", ret);
+ goto err_pm_runtime;
}
clock_stop_quirks = sdw->link_res->clock_stop_quirks;
@@ -293,12 +364,18 @@ int intel_link_startup(struct auxiliary_device *auxdev)
* with a delay. A more complete solution would require the
* definition of Master properties.
*/
- if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE))
+ if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE)) {
+ pm_runtime_mark_last_busy(bus->dev);
+ pm_runtime_mark_last_busy(dev);
pm_runtime_idle(dev);
+ }
sdw->startup_done = true;
return 0;
+err_pm_runtime:
+ if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME))
+ pm_runtime_disable(dev);
err_power_up:
sdw_intel_link_power_down(sdw);
err_init:
@@ -552,6 +629,8 @@ static int __maybe_unused intel_resume(struct device *dev)
pm_runtime_mark_last_busy(dev);
pm_runtime_enable(dev);
+ pm_runtime_resume(bus->dev);
+
link_flags = md_flags >> (bus->link_id * 8);
if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE))
@@ -587,6 +666,7 @@ static int __maybe_unused intel_resume(struct device *dev)
* counters and delay the pm_runtime suspend by several
* seconds, by when all enumeration should be complete.
*/
+ pm_runtime_mark_last_busy(bus->dev);
pm_runtime_mark_last_busy(dev);
return 0;
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index c029e4d53573..55be9f4b8d59 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -10,7 +10,6 @@
#include <linux/debugfs.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
index 453a9b37ce78..d239fc5a49cc 100644
--- a/drivers/spi/spi-cs42l43.c
+++ b/drivers/spi/spi-cs42l43.c
@@ -256,7 +256,6 @@ static int cs42l43_spi_probe(struct platform_device *pdev)
ret = devm_spi_register_controller(priv->dev, priv->ctlr);
if (ret) {
- pm_runtime_disable(priv->dev);
dev_err(priv->dev, "Failed to register SPI controller: %d\n", ret);
}
diff --git a/drivers/spi/spi-gxp.c b/drivers/spi/spi-gxp.c
index fd2fac236bbd..3aff5a166c94 100644
--- a/drivers/spi/spi-gxp.c
+++ b/drivers/spi/spi-gxp.c
@@ -194,7 +194,7 @@ static ssize_t gxp_spi_write(struct gxp_spi_chip *chip, const struct spi_mem_op
return ret;
}
- return write_len;
+ return 0;
}
static int do_gxp_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index a8a74c7cb79f..498e35c8db2c 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -662,7 +662,7 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
if (spi_imx->count >= 512)
ctrl |= 0xFFF << MX51_ECSPI_CTRL_BL_OFFSET;
else
- ctrl |= (spi_imx->count*8 - 1)
+ ctrl |= (spi_imx->count * spi_imx->bits_per_word - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
}
diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
index a7381e774b95..57d767a68e7b 100644
--- a/drivers/spi/spi-intel-pci.c
+++ b/drivers/spi/spi-intel-pci.c
@@ -72,6 +72,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x5794), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7e23), (unsigned long)&cnl_info },
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index 45a4acc95661..c964f41dcc42 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -1084,6 +1084,13 @@ static int nxp_fspi_default_setup(struct nxp_fspi *f)
fspi_writel(f, FSPI_AHBCR_PREF_EN | FSPI_AHBCR_RDADDROPT,
base + FSPI_AHBCR);
+ /* Reset the FLSHxCR1 registers. */
+ reg = FSPI_FLSHXCR1_TCSH(0x3) | FSPI_FLSHXCR1_TCSS(0x3);
+ fspi_writel(f, reg, base + FSPI_FLSHA1CR1);
+ fspi_writel(f, reg, base + FSPI_FLSHA2CR1);
+ fspi_writel(f, reg, base + FSPI_FLSHB1CR1);
+ fspi_writel(f, reg, base + FSPI_FLSHB2CR1);
+
/* AHB Read - Set lut sequence ID for all CS. */
fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA1CR2);
fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA2CR2);
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index b6d66caba4c0..ef665f470c5b 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -277,6 +277,7 @@ struct stm32_spi_cfg {
* @fifo_size: size of the embedded fifo in bytes
* @cur_midi: master inter-data idleness in ns
* @cur_speed: speed configured in Hz
+ * @cur_half_period: time of a half bit in us
* @cur_bpw: number of bits in a single SPI data frame
* @cur_fthlv: fifo threshold level (data frames in a single data packet)
* @cur_comm: SPI communication mode
@@ -304,6 +305,7 @@ struct stm32_spi {
unsigned int cur_midi;
unsigned int cur_speed;
+ unsigned int cur_half_period;
unsigned int cur_bpw;
unsigned int cur_fthlv;
unsigned int cur_comm;
@@ -468,6 +470,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
spi->cur_speed = spi->clk_rate / (1 << mbrdiv);
+ spi->cur_half_period = DIV_ROUND_CLOSEST(USEC_PER_SEC, 2 * spi->cur_speed);
+
return mbrdiv - 1;
}
@@ -709,6 +713,10 @@ static void stm32h7_spi_disable(struct stm32_spi *spi)
return;
}
+ /* Add a delay to make sure that transmission is ended. */
+ if (spi->cur_half_period)
+ udelay(spi->cur_half_period);
+
if (spi->cur_usedma && spi->dma_tx)
dmaengine_terminate_async(spi->dma_tx);
if (spi->cur_usedma && spi->dma_rx)
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index 3f5b1556ece0..fddc63309773 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -106,6 +106,7 @@ struct sun6i_spi {
struct reset_control *rstc;
struct completion done;
+ struct completion dma_rx_done;
const u8 *tx_buf;
u8 *rx_buf;
@@ -200,6 +201,13 @@ static size_t sun6i_spi_max_transfer_size(struct spi_device *spi)
return SUN6I_MAX_XFER_SIZE - 1;
}
+static void sun6i_spi_dma_rx_cb(void *param)
+{
+ struct sun6i_spi *sspi = param;
+
+ complete(&sspi->dma_rx_done);
+}
+
static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi,
struct spi_transfer *tfr)
{
@@ -211,7 +219,7 @@ static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi,
struct dma_slave_config rxconf = {
.direction = DMA_DEV_TO_MEM,
.src_addr = sspi->dma_addr_rx,
- .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
.src_maxburst = 8,
};
@@ -224,6 +232,8 @@ static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi,
DMA_PREP_INTERRUPT);
if (!rxdesc)
return -EINVAL;
+ rxdesc->callback_param = sspi;
+ rxdesc->callback = sun6i_spi_dma_rx_cb;
}
txdesc = NULL;
@@ -279,6 +289,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
return -EINVAL;
reinit_completion(&sspi->done);
+ reinit_completion(&sspi->dma_rx_done);
sspi->tx_buf = tfr->tx_buf;
sspi->rx_buf = tfr->rx_buf;
sspi->len = tfr->len;
@@ -479,6 +490,22 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
start = jiffies;
timeout = wait_for_completion_timeout(&sspi->done,
msecs_to_jiffies(tx_time));
+
+ if (!use_dma) {
+ sun6i_spi_drain_fifo(sspi);
+ } else {
+ if (timeout && rx_len) {
+ /*
+ * Even though RX on the peripheral side has finished
+ * RX DMA might still be in flight
+ */
+ timeout = wait_for_completion_timeout(&sspi->dma_rx_done,
+ timeout);
+ if (!timeout)
+ dev_warn(&master->dev, "RX DMA timeout\n");
+ }
+ }
+
end = jiffies;
if (!timeout) {
dev_warn(&master->dev,
@@ -506,7 +533,6 @@ static irqreturn_t sun6i_spi_handler(int irq, void *dev_id)
/* Transfer complete */
if (status & SUN6I_INT_CTL_TC) {
sun6i_spi_write(sspi, SUN6I_INT_STA_REG, SUN6I_INT_CTL_TC);
- sun6i_spi_drain_fifo(sspi);
complete(&sspi->done);
return IRQ_HANDLED;
}
@@ -665,6 +691,7 @@ static int sun6i_spi_probe(struct platform_device *pdev)
}
init_completion(&sspi->done);
+ init_completion(&sspi->dma_rx_done);
sspi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(sspi->rstc)) {
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 94d9a33d9af5..9a46b2478f4e 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -1340,9 +1340,9 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
return 0;
clk_dis_all:
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
clk_disable_unprepare(xqspi->refclk);
clk_dis_pclk:
clk_disable_unprepare(xqspi->pclk);
@@ -1366,11 +1366,15 @@ static void zynqmp_qspi_remove(struct platform_device *pdev)
{
struct zynqmp_qspi *xqspi = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(&pdev->dev);
+
zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
+
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
clk_disable_unprepare(xqspi->refclk);
clk_disable_unprepare(xqspi->pclk);
- pm_runtime_set_suspended(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
}
MODULE_DEVICE_TABLE(of, zynqmp_qspi_of_match);
diff --git a/drivers/staging/greybus/pwm.c b/drivers/staging/greybus/pwm.c
index f569d371a007..57cc1960d059 100644
--- a/drivers/staging/greybus/pwm.c
+++ b/drivers/staging/greybus/pwm.c
@@ -266,7 +266,7 @@ static int gb_pwm_probe(struct gbphy_device *gbphy_dev,
{
struct gb_connection *connection;
struct gb_pwm_chip *pwmc;
- struct pwm_chip *pwm;
+ struct pwm_chip *chip;
int ret;
pwmc = kzalloc(sizeof(*pwmc), GFP_KERNEL);
@@ -294,13 +294,13 @@ static int gb_pwm_probe(struct gbphy_device *gbphy_dev,
if (ret)
goto exit_connection_disable;
- pwm = &pwmc->chip;
+ chip = &pwmc->chip;
- pwm->dev = &gbphy_dev->dev;
- pwm->ops = &gb_pwm_ops;
- pwm->npwm = pwmc->pwm_max + 1;
+ chip->dev = &gbphy_dev->dev;
+ chip->ops = &gb_pwm_ops;
+ chip->npwm = pwmc->pwm_max + 1;
- ret = pwmchip_add(pwm);
+ ret = pwmchip_add(chip);
if (ret) {
dev_err(&gbphy_dev->dev,
"failed to register PWM: %d\n", ret);
diff --git a/drivers/staging/media/atomisp/Kconfig b/drivers/staging/media/atomisp/Kconfig
index 5d8917160d41..75c985da75b5 100644
--- a/drivers/staging/media/atomisp/Kconfig
+++ b/drivers/staging/media/atomisp/Kconfig
@@ -12,12 +12,12 @@ menuconfig INTEL_ATOMISP
config VIDEO_ATOMISP
tristate "Intel Atom Image Signal Processor Driver"
depends on VIDEO_DEV && INTEL_ATOMISP
+ depends on IPU_BRIDGE
depends on MEDIA_PCI_SUPPORT
depends on PMIC_OPREGION
depends on I2C
select V4L2_FWNODE
select IOSF_MBI
- select IPU_BRIDGE
select VIDEOBUF2_VMALLOC
select VIDEO_V4L2_SUBDEV_API
help
diff --git a/drivers/staging/media/av7110/sp8870.c b/drivers/staging/media/av7110/sp8870.c
index 9767159aeb9b..abf5c72607b6 100644
--- a/drivers/staging/media/av7110/sp8870.c
+++ b/drivers/staging/media/av7110/sp8870.c
@@ -606,4 +606,4 @@ MODULE_DESCRIPTION("Spase SP8870 DVB-T Demodulator driver");
MODULE_AUTHOR("Juergen Peitz");
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(sp8870_attach);
+EXPORT_SYMBOL_GPL(sp8870_attach);
diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
index e98b3010520e..94171e62dee9 100644
--- a/drivers/staging/media/tegra-video/vi.c
+++ b/drivers/staging/media/tegra-video/vi.c
@@ -1455,17 +1455,18 @@ static int __maybe_unused vi_runtime_suspend(struct device *dev)
}
/*
- * Graph Management
+ * Find the entity matching a given fwnode in an v4l2_async_notifier list
*/
static struct tegra_vi_graph_entity *
-tegra_vi_graph_find_entity(struct tegra_vi_channel *chan,
+tegra_vi_graph_find_entity(struct list_head *list,
const struct fwnode_handle *fwnode)
{
struct tegra_vi_graph_entity *entity;
struct v4l2_async_connection *asd;
- list_for_each_entry(asd, &chan->notifier.done_list, asc_entry) {
+ list_for_each_entry(asd, list, asc_entry) {
entity = to_tegra_vi_graph_entity(asd);
+
if (entity->asd.match.fwnode == fwnode)
return entity;
}
@@ -1532,7 +1533,8 @@ static int tegra_vi_graph_build(struct tegra_vi_channel *chan,
}
/* find the remote entity from notifier list */
- ent = tegra_vi_graph_find_entity(chan, link.remote_node);
+ ent = tegra_vi_graph_find_entity(&chan->notifier.done_list,
+ link.remote_node);
if (!ent) {
dev_err(vi->dev, "no entity found for %pOF\n",
to_of_node(link.remote_node));
@@ -1664,7 +1666,8 @@ static int tegra_vi_graph_notify_bound(struct v4l2_async_notifier *notifier,
* Locate the entity corresponding to the bound subdev and store the
* subdev pointer.
*/
- entity = tegra_vi_graph_find_entity(chan, subdev->fwnode);
+ entity = tegra_vi_graph_find_entity(&chan->notifier.waiting_list,
+ subdev->fwnode);
if (!entity) {
dev_err(vi->dev, "no entity for subdev %s\n", subdev->name);
return -EINVAL;
@@ -1713,7 +1716,8 @@ static int tegra_vi_graph_parse_one(struct tegra_vi_channel *chan,
/* skip entities that are already processed */
if (device_match_fwnode(vi->dev, remote) ||
- tegra_vi_graph_find_entity(chan, remote)) {
+ tegra_vi_graph_find_entity(&chan->notifier.waiting_list,
+ remote)) {
fwnode_handle_put(remote);
continue;
}
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 5d0f51822414..1cff6052e820 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -45,9 +45,9 @@ static ssize_t lio_target_np_driver_show(struct config_item *item, char *page,
tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type);
if (tpg_np_new)
- rb = sprintf(page, "1\n");
+ rb = sysfs_emit(page, "1\n");
else
- rb = sprintf(page, "0\n");
+ rb = sysfs_emit(page, "0\n");
return rb;
}
@@ -282,7 +282,7 @@ static ssize_t iscsi_nacl_attrib_##name##_show(struct config_item *item,\
{ \
struct se_node_acl *se_nacl = attrib_to_nacl(item); \
struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl); \
- return sprintf(page, "%u\n", nacl->node_attrib.name); \
+ return sysfs_emit(page, "%u\n", nacl->node_attrib.name); \
} \
\
static ssize_t iscsi_nacl_attrib_##name##_store(struct config_item *item,\
@@ -320,7 +320,7 @@ static ssize_t iscsi_nacl_attrib_authentication_show(struct config_item *item,
struct se_node_acl *se_nacl = attrib_to_nacl(item);
struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl);
- return sprintf(page, "%d\n", nacl->node_attrib.authentication);
+ return sysfs_emit(page, "%d\n", nacl->node_attrib.authentication);
}
static ssize_t iscsi_nacl_attrib_authentication_store(struct config_item *item,
@@ -533,102 +533,102 @@ static ssize_t lio_target_nacl_info_show(struct config_item *item, char *page)
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (!se_sess) {
- rb += sprintf(page+rb, "No active iSCSI Session for Initiator"
+ rb += sysfs_emit_at(page, rb, "No active iSCSI Session for Initiator"
" Endpoint: %s\n", se_nacl->initiatorname);
} else {
sess = se_sess->fabric_sess_ptr;
- rb += sprintf(page+rb, "InitiatorName: %s\n",
+ rb += sysfs_emit_at(page, rb, "InitiatorName: %s\n",
sess->sess_ops->InitiatorName);
- rb += sprintf(page+rb, "InitiatorAlias: %s\n",
+ rb += sysfs_emit_at(page, rb, "InitiatorAlias: %s\n",
sess->sess_ops->InitiatorAlias);
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"LIO Session ID: %u ISID: 0x%6ph TSIH: %hu ",
sess->sid, sess->isid, sess->tsih);
- rb += sprintf(page+rb, "SessionType: %s\n",
+ rb += sysfs_emit_at(page, rb, "SessionType: %s\n",
(sess->sess_ops->SessionType) ?
"Discovery" : "Normal");
- rb += sprintf(page+rb, "Session State: ");
+ rb += sysfs_emit_at(page, rb, "Session State: ");
switch (sess->session_state) {
case TARG_SESS_STATE_FREE:
- rb += sprintf(page+rb, "TARG_SESS_FREE\n");
+ rb += sysfs_emit_at(page, rb, "TARG_SESS_FREE\n");
break;
case TARG_SESS_STATE_ACTIVE:
- rb += sprintf(page+rb, "TARG_SESS_STATE_ACTIVE\n");
+ rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_ACTIVE\n");
break;
case TARG_SESS_STATE_LOGGED_IN:
- rb += sprintf(page+rb, "TARG_SESS_STATE_LOGGED_IN\n");
+ rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_LOGGED_IN\n");
break;
case TARG_SESS_STATE_FAILED:
- rb += sprintf(page+rb, "TARG_SESS_STATE_FAILED\n");
+ rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_FAILED\n");
break;
case TARG_SESS_STATE_IN_CONTINUE:
- rb += sprintf(page+rb, "TARG_SESS_STATE_IN_CONTINUE\n");
+ rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_IN_CONTINUE\n");
break;
default:
- rb += sprintf(page+rb, "ERROR: Unknown Session"
+ rb += sysfs_emit_at(page, rb, "ERROR: Unknown Session"
" State!\n");
break;
}
- rb += sprintf(page+rb, "---------------------[iSCSI Session"
+ rb += sysfs_emit_at(page, rb, "---------------------[iSCSI Session"
" Values]-----------------------\n");
- rb += sprintf(page+rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN"
+ rb += sysfs_emit_at(page, rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN"
" : MaxCmdSN : ITT : TTT\n");
max_cmd_sn = (u32) atomic_read(&sess->max_cmd_sn);
- rb += sprintf(page+rb, " 0x%08x 0x%08x 0x%08x 0x%08x"
+ rb += sysfs_emit_at(page, rb, " 0x%08x 0x%08x 0x%08x 0x%08x"
" 0x%08x 0x%08x\n",
sess->cmdsn_window,
(max_cmd_sn - sess->exp_cmd_sn) + 1,
sess->exp_cmd_sn, max_cmd_sn,
sess->init_task_tag, sess->targ_xfer_tag);
- rb += sprintf(page+rb, "----------------------[iSCSI"
+ rb += sysfs_emit_at(page, rb, "----------------------[iSCSI"
" Connections]-------------------------\n");
spin_lock(&sess->conn_lock);
list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
- rb += sprintf(page+rb, "CID: %hu Connection"
+ rb += sysfs_emit_at(page, rb, "CID: %hu Connection"
" State: ", conn->cid);
switch (conn->conn_state) {
case TARG_CONN_STATE_FREE:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_FREE\n");
break;
case TARG_CONN_STATE_XPT_UP:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_XPT_UP\n");
break;
case TARG_CONN_STATE_IN_LOGIN:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_IN_LOGIN\n");
break;
case TARG_CONN_STATE_LOGGED_IN:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_LOGGED_IN\n");
break;
case TARG_CONN_STATE_IN_LOGOUT:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_IN_LOGOUT\n");
break;
case TARG_CONN_STATE_LOGOUT_REQUESTED:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_LOGOUT_REQUESTED\n");
break;
case TARG_CONN_STATE_CLEANUP_WAIT:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_CLEANUP_WAIT\n");
break;
default:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"ERROR: Unknown Connection State!\n");
break;
}
- rb += sprintf(page+rb, " Address %pISc %s", &conn->login_sockaddr,
+ rb += sysfs_emit_at(page, rb, " Address %pISc %s", &conn->login_sockaddr,
(conn->network_transport == ISCSI_TCP) ?
"TCP" : "SCTP");
- rb += sprintf(page+rb, " StatSN: 0x%08x\n",
+ rb += sysfs_emit_at(page, rb, " StatSN: 0x%08x\n",
conn->stat_sn);
}
spin_unlock(&sess->conn_lock);
@@ -641,7 +641,7 @@ static ssize_t lio_target_nacl_info_show(struct config_item *item, char *page)
static ssize_t lio_target_nacl_cmdsn_depth_show(struct config_item *item,
char *page)
{
- return sprintf(page, "%u\n", acl_to_nacl(item)->queue_depth);
+ return sysfs_emit(page, "%u\n", acl_to_nacl(item)->queue_depth);
}
static ssize_t lio_target_nacl_cmdsn_depth_store(struct config_item *item,
@@ -750,7 +750,7 @@ static ssize_t iscsi_tpg_attrib_##name##_show(struct config_item *item, \
if (iscsit_get_tpg(tpg) < 0) \
return -EINVAL; \
\
- rb = sprintf(page, "%u\n", tpg->tpg_attrib.name); \
+ rb = sysfs_emit(page, "%u\n", tpg->tpg_attrib.name); \
iscsit_put_tpg(tpg); \
return rb; \
} \
@@ -783,7 +783,6 @@ CONFIGFS_ATTR(iscsi_tpg_attrib_, name)
DEF_TPG_ATTRIB(authentication);
DEF_TPG_ATTRIB(login_timeout);
-DEF_TPG_ATTRIB(netif_timeout);
DEF_TPG_ATTRIB(generate_node_acls);
DEF_TPG_ATTRIB(default_cmdsn_depth);
DEF_TPG_ATTRIB(cache_dynamic_acls);
@@ -799,7 +798,6 @@ DEF_TPG_ATTRIB(login_keys_workaround);
static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_attr_authentication,
&iscsi_tpg_attrib_attr_login_timeout,
- &iscsi_tpg_attrib_attr_netif_timeout,
&iscsi_tpg_attrib_attr_generate_node_acls,
&iscsi_tpg_attrib_attr_default_cmdsn_depth,
&iscsi_tpg_attrib_attr_cache_dynamic_acls,
@@ -1138,7 +1136,7 @@ static void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
static ssize_t lio_target_wwn_lio_version_show(struct config_item *item,
char *page)
{
- return sprintf(page, "Datera Inc. iSCSI Target "ISCSIT_VERSION"\n");
+ return sysfs_emit(page, "Datera Inc. iSCSI Target %s\n", ISCSIT_VERSION);
}
CONFIGFS_ATTR_RO(lio_target_wwn_, lio_version);
@@ -1146,7 +1144,7 @@ CONFIGFS_ATTR_RO(lio_target_wwn_, lio_version);
static ssize_t lio_target_wwn_cpus_allowed_list_show(
struct config_item *item, char *page)
{
- return sprintf(page, "%*pbl\n",
+ return sysfs_emit(page, "%*pbl\n",
cpumask_pr_args(iscsit_global->allowed_cpumask));
}
@@ -1283,7 +1281,7 @@ static ssize_t iscsi_disc_enforce_discovery_auth_show(struct config_item *item,
{
struct iscsi_node_auth *discovery_auth = &iscsit_global->discovery_acl.node_auth;
- return sprintf(page, "%d\n", discovery_auth->enforce_discovery_auth);
+ return sysfs_emit(page, "%d\n", discovery_auth->enforce_discovery_auth);
}
static ssize_t iscsi_disc_enforce_discovery_auth_store(struct config_item *item,
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 3cac1aafef68..f7bac98fd4fe 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -211,7 +211,6 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
a->authentication = TA_AUTHENTICATION;
a->login_timeout = TA_LOGIN_TIMEOUT;
- a->netif_timeout = TA_NETIF_TIMEOUT;
a->default_cmdsn_depth = TA_DEFAULT_CMDSN_DEPTH;
a->generate_node_acls = TA_GENERATE_NODE_ACLS;
a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS;
@@ -666,31 +665,6 @@ int iscsit_ta_login_timeout(
return 0;
}
-int iscsit_ta_netif_timeout(
- struct iscsi_portal_group *tpg,
- u32 netif_timeout)
-{
- struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
-
- if (netif_timeout > TA_NETIF_TIMEOUT_MAX) {
- pr_err("Requested Network Interface Timeout %u larger"
- " than maximum %u\n", netif_timeout,
- TA_NETIF_TIMEOUT_MAX);
- return -EINVAL;
- } else if (netif_timeout < TA_NETIF_TIMEOUT_MIN) {
- pr_err("Requested Network Interface Timeout %u smaller"
- " than minimum %u\n", netif_timeout,
- TA_NETIF_TIMEOUT_MIN);
- return -EINVAL;
- }
-
- a->netif_timeout = netif_timeout;
- pr_debug("Set Network Interface Timeout to %u for"
- " Target Portal Group %hu\n", a->netif_timeout, tpg->tpgt);
-
- return 0;
-}
-
int iscsit_ta_generate_node_acls(
struct iscsi_portal_group *tpg,
u32 flag)
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 839e45362776..71d067f62177 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -38,7 +38,6 @@ extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *,
struct iscsi_tpg_np *);
extern int iscsit_ta_authentication(struct iscsi_portal_group *, u32);
extern int iscsit_ta_login_timeout(struct iscsi_portal_group *, u32);
-extern int iscsit_ta_netif_timeout(struct iscsi_portal_group *, u32);
extern int iscsit_ta_generate_node_acls(struct iscsi_portal_group *, u32);
extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32);
extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 936e5ff1b209..d5860c1c1f46 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1392,16 +1392,16 @@ static ssize_t target_wwn_vendor_id_store(struct config_item *item,
/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
unsigned char buf[INQUIRY_VENDOR_LEN + 2];
char *stripped = NULL;
- size_t len;
+ ssize_t len;
ssize_t ret;
- len = strlcpy(buf, page, sizeof(buf));
- if (len < sizeof(buf)) {
+ len = strscpy(buf, page, sizeof(buf));
+ if (len > 0) {
/* Strip any newline added from userspace. */
stripped = strstrip(buf);
len = strlen(stripped);
}
- if (len > INQUIRY_VENDOR_LEN) {
+ if (len < 0 || len > INQUIRY_VENDOR_LEN) {
pr_err("Emulated T10 Vendor Identification exceeds"
" INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN)
"\n");
@@ -1448,16 +1448,16 @@ static ssize_t target_wwn_product_id_store(struct config_item *item,
/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
unsigned char buf[INQUIRY_MODEL_LEN + 2];
char *stripped = NULL;
- size_t len;
+ ssize_t len;
ssize_t ret;
- len = strlcpy(buf, page, sizeof(buf));
- if (len < sizeof(buf)) {
+ len = strscpy(buf, page, sizeof(buf));
+ if (len > 0) {
/* Strip any newline added from userspace. */
stripped = strstrip(buf);
len = strlen(stripped);
}
- if (len > INQUIRY_MODEL_LEN) {
+ if (len < 0 || len > INQUIRY_MODEL_LEN) {
pr_err("Emulated T10 Vendor exceeds INQUIRY_MODEL_LEN: "
__stringify(INQUIRY_MODEL_LEN)
"\n");
@@ -1504,16 +1504,16 @@ static ssize_t target_wwn_revision_store(struct config_item *item,
/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
unsigned char buf[INQUIRY_REVISION_LEN + 2];
char *stripped = NULL;
- size_t len;
+ ssize_t len;
ssize_t ret;
- len = strlcpy(buf, page, sizeof(buf));
- if (len < sizeof(buf)) {
+ len = strscpy(buf, page, sizeof(buf));
+ if (len > 0) {
/* Strip any newline added from userspace. */
stripped = strstrip(buf);
len = strlen(stripped);
}
- if (len > INQUIRY_REVISION_LEN) {
+ if (len < 0 || len > INQUIRY_REVISION_LEN) {
pr_err("Emulated T10 Revision exceeds INQUIRY_REVISION_LEN: "
__stringify(INQUIRY_REVISION_LEN)
"\n");
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index b7ac60f4a219..b6523d4b9259 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -843,7 +843,6 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
EXPORT_SYMBOL(target_to_linux_sector);
struct devices_idr_iter {
- struct config_item *prev_item;
int (*fn)(struct se_device *dev, void *data);
void *data;
};
@@ -853,11 +852,9 @@ static int target_devices_idr_iter(int id, void *p, void *data)
{
struct devices_idr_iter *iter = data;
struct se_device *dev = p;
+ struct config_item *item;
int ret;
- config_item_put(iter->prev_item);
- iter->prev_item = NULL;
-
/*
* We add the device early to the idr, so it can be used
* by backend modules during configuration. We do not want
@@ -867,12 +864,13 @@ static int target_devices_idr_iter(int id, void *p, void *data)
if (!target_dev_configured(dev))
return 0;
- iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
- if (!iter->prev_item)
+ item = config_item_get_unless_zero(&dev->dev_group.cg_item);
+ if (!item)
return 0;
mutex_unlock(&device_mutex);
ret = iter->fn(dev, iter->data);
+ config_item_put(item);
mutex_lock(&device_mutex);
return ret;
@@ -895,7 +893,6 @@ int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
mutex_lock(&device_mutex);
ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
mutex_unlock(&device_mutex);
- config_item_put(iter.prev_item);
return ret;
}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index a7050f63b7cc..a6a06a5f7483 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -739,11 +739,16 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (data_direction == DMA_TO_DEVICE) {
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+
+ /*
+ * Set bits to indicate WRITE_ODIRECT so we are not throttled
+ * by WBT.
+ */
+ opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
/*
* Force writethrough using REQ_FUA if a volatile write cache
* is not enabled, or if initiator set the Force Unit Access bit.
*/
- opf = REQ_OP_WRITE;
miter_dir = SG_MITER_TO_SG;
if (bdev_fua(ib_dev->ibd_bd)) {
if (cmd->se_cmd_flags & SCF_FUA)
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 687adc9e086c..0686882bcbda 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -264,6 +264,7 @@ void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt)
percpu_ref_put(&cmd_cnt->refcnt);
percpu_ref_exit(&cmd_cnt->refcnt);
+ kfree(cmd_cnt);
}
EXPORT_SYMBOL_GPL(target_free_cmd_counter);
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index 72685ee0d53f..6bb5cae09688 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -238,8 +238,6 @@ int optee_notif_send(struct optee *optee, u_int key);
u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
struct tee_param *param);
-int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len);
-int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len);
void optee_supp_init(struct optee_supp *supp);
void optee_supp_uninit(struct optee_supp *supp);
void optee_supp_release(struct optee_supp *supp);
diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h
index 409cadcc1cff..754e11dcb240 100644
--- a/drivers/tee/tee_private.h
+++ b/drivers/tee/tee_private.h
@@ -47,8 +47,6 @@ struct tee_device {
struct tee_shm_pool *pool;
};
-int tee_shm_init(void);
-
int tee_shm_get_fd(struct tee_shm *shm);
bool tee_device_get(struct tee_device *teedev);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 19a4b33cb564..c81a00fbca7d 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -510,4 +510,16 @@ config KHADAS_MCU_FAN_THERMAL
If you say yes here you get support for the FAN controlled
by the Microcontroller found on the Khadas VIM boards.
+config LOONGSON2_THERMAL
+ tristate "Loongson-2 SoC series thermal driver"
+ depends on LOONGARCH || COMPILE_TEST
+ depends on OF
+ help
+ Support for Thermal driver found on Loongson-2 SoC series platforms.
+ The thermal driver realizes get_temp and set_trips function, which
+ are used to obtain the temperature of the current node and set the
+ temperature range to trigger the interrupt. When the input temperature
+ is higher than the high temperature threshold or lower than the low
+ temperature threshold, the interrupt will occur.
+
endif
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 058664bc3ec0..c934cab309ae 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -63,3 +63,4 @@ obj-$(CONFIG_UNIPHIER_THERMAL) += uniphier_thermal.o
obj-$(CONFIG_AMLOGIC_THERMAL) += amlogic_thermal.o
obj-$(CONFIG_SPRD_THERMAL) += sprd_thermal.o
obj-$(CONFIG_KHADAS_MCU_FAN_THERMAL) += khadas_mcu_fan.o
+obj-$(CONFIG_LOONGSON2_THERMAL) += loongson2_thermal.o
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 9f6dc4fc9112..f00765bfc22e 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -876,8 +876,9 @@ static int armada_thermal_probe(struct platform_device *pdev)
/* Wait the sensors to be valid */
armada_wait_sensor_validity(priv);
- tz = thermal_zone_device_register(priv->zone_name, 0, 0, priv,
- &legacy_ops, NULL, 0, 0);
+ tz = thermal_tripless_zone_device_register(priv->zone_name,
+ priv, &legacy_ops,
+ NULL);
if (IS_ERR(tz)) {
dev_err(&pdev->dev,
"Failed to register thermal zone device\n");
diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c
index 0b73abdaa792..9674e5ffcfa2 100644
--- a/drivers/thermal/broadcom/brcmstb_thermal.c
+++ b/drivers/thermal/broadcom/brcmstb_thermal.c
@@ -334,7 +334,6 @@ static int brcmstb_thermal_probe(struct platform_device *pdev)
return PTR_ERR(priv->tmon_base);
priv->dev = &pdev->dev;
- platform_set_drvdata(pdev, priv);
of_ops = priv->temp_params->of_ops;
thermal = devm_thermal_of_zone_register(&pdev->dev, 0, priv,
diff --git a/drivers/thermal/broadcom/sr-thermal.c b/drivers/thermal/broadcom/sr-thermal.c
index 747915890022..9a29dfd4c7fe 100644
--- a/drivers/thermal/broadcom/sr-thermal.c
+++ b/drivers/thermal/broadcom/sr-thermal.c
@@ -91,7 +91,6 @@ static int sr_thermal_probe(struct platform_device *pdev)
dev_dbg(dev, "thermal sensor %d registered\n", i);
}
- platform_set_drvdata(pdev, sr_thermal);
return 0;
}
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index fca5c2c93bf9..576f88b6a1b3 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -229,7 +229,7 @@ MODULE_DEVICE_TABLE(of, db8500_thermal_match);
static struct platform_driver db8500_thermal_driver = {
.driver = {
.name = "db8500-thermal",
- .of_match_table = of_match_ptr(db8500_thermal_match),
+ .of_match_table = db8500_thermal_match,
},
.probe = db8500_thermal_probe,
.suspend = db8500_thermal_suspend,
diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c
index 9954040d1d2c..7a18cb960bee 100644
--- a/drivers/thermal/dove_thermal.c
+++ b/drivers/thermal/dove_thermal.c
@@ -139,8 +139,8 @@ static int dove_thermal_probe(struct platform_device *pdev)
return ret;
}
- thermal = thermal_zone_device_register("dove_thermal", 0, 0,
- priv, &ops, NULL, 0, 0);
+ thermal = thermal_tripless_zone_device_register("dove_thermal", priv,
+ &ops, NULL);
if (IS_ERR(thermal)) {
dev_err(&pdev->dev,
"Failed to register thermal zone device\n");
diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c
index e89b11b3f2b9..14111ccf6e4c 100644
--- a/drivers/thermal/imx8mm_thermal.c
+++ b/drivers/thermal/imx8mm_thermal.c
@@ -178,10 +178,8 @@ static int imx8mm_tmu_probe_set_calib_v1(struct platform_device *pdev,
int ret;
ret = nvmem_cell_read_u32(&pdev->dev, "calib", &ana0);
- if (ret) {
- dev_warn(dev, "Failed to read OCOTP nvmem cell (%d).\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read OCOTP nvmem cell\n");
writel(FIELD_PREP(TASR_BUF_VREF_MASK,
FIELD_GET(ANA0_BUF_VREF_MASK, ana0)) |
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index ddd600820f68..ffc2871a021c 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -609,9 +609,9 @@ static int int3400_thermal_probe(struct platform_device *pdev)
evaluate_odvp(priv);
- priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
- priv, &int3400_thermal_ops,
- &int3400_thermal_params, 0, 0);
+ priv->thermal = thermal_tripless_zone_device_register("INT3400 Thermal", priv,
+ &int3400_thermal_ops,
+ &int3400_thermal_params);
if (IS_ERR(priv->thermal)) {
result = PTR_ERR(priv->thermal);
goto free_art_trt;
diff --git a/drivers/thermal/k3_bandgap.c b/drivers/thermal/k3_bandgap.c
index 68f59b3735d3..4a918c1e92f9 100644
--- a/drivers/thermal/k3_bandgap.c
+++ b/drivers/thermal/k3_bandgap.c
@@ -225,7 +225,6 @@ static int k3_bandgap_probe(struct platform_device *pdev)
devm_thermal_add_hwmon_sysfs(dev, data[id].tzd);
}
- platform_set_drvdata(pdev, bgp);
return 0;
diff --git a/drivers/thermal/k3_j72xx_bandgap.c b/drivers/thermal/k3_j72xx_bandgap.c
index a5a0fc9b9356..2fc799b07b90 100644
--- a/drivers/thermal/k3_j72xx_bandgap.c
+++ b/drivers/thermal/k3_j72xx_bandgap.c
@@ -502,8 +502,6 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev)
writel(K3_VTM_ANYMAXT_OUTRG_ALERT_EN, data[0].bgp->cfg2_base +
K3_VTM_MISC_CTRL_OFFSET);
- platform_set_drvdata(pdev, bgp);
-
print_look_up_table(dev, ref_table);
/*
* Now that the derived_table has the appropriate look up values
diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c
index 668747bd86ef..acb10d24256d 100644
--- a/drivers/thermal/kirkwood_thermal.c
+++ b/drivers/thermal/kirkwood_thermal.c
@@ -71,8 +71,8 @@ static int kirkwood_thermal_probe(struct platform_device *pdev)
if (IS_ERR(priv->sensor))
return PTR_ERR(priv->sensor);
- thermal = thermal_zone_device_register("kirkwood_thermal", 0, 0,
- priv, &ops, NULL, 0, 0);
+ thermal = thermal_tripless_zone_device_register("kirkwood_thermal",
+ priv, &ops, NULL);
if (IS_ERR(thermal)) {
dev_err(&pdev->dev,
"Failed to register thermal zone device\n");
diff --git a/drivers/thermal/loongson2_thermal.c b/drivers/thermal/loongson2_thermal.c
new file mode 100644
index 000000000000..133098dc0854
--- /dev/null
+++ b/drivers/thermal/loongson2_thermal.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Author: zhanghongchen <zhanghongchen@loongson.cn>
+ * Yinbo Zhu <zhuyinbo@loongson.cn>
+ * Copyright (C) 2022-2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+#include <linux/units.h>
+#include "thermal_hwmon.h"
+
+#define LOONGSON2_MAX_SENSOR_SEL_NUM 3
+
+#define LOONGSON2_THSENS_CTRL_HI_REG 0x0
+#define LOONGSON2_THSENS_CTRL_LOW_REG 0x8
+#define LOONGSON2_THSENS_STATUS_REG 0x10
+#define LOONGSON2_THSENS_OUT_REG 0x14
+
+#define LOONGSON2_THSENS_INT_LO BIT(0)
+#define LOONGSON2_THSENS_INT_HIGH BIT(1)
+#define LOONGSON2_THSENS_OUT_MASK 0xFF
+
+struct loongson2_thermal_chip_data {
+ unsigned int thermal_sensor_sel;
+};
+
+struct loongson2_thermal_data {
+ void __iomem *regs;
+ const struct loongson2_thermal_chip_data *chip_data;
+};
+
+static int loongson2_thermal_set(struct loongson2_thermal_data *data,
+ int low, int high, bool enable)
+{
+ u64 reg_ctrl = 0;
+ int reg_off = data->chip_data->thermal_sensor_sel * 2;
+
+ low = clamp(-40, low, high);
+ high = clamp(125, low, high);
+
+ low += HECTO;
+ high += HECTO;
+
+ reg_ctrl = low;
+ reg_ctrl |= enable ? 0x100 : 0;
+ writew(reg_ctrl, data->regs + LOONGSON2_THSENS_CTRL_LOW_REG + reg_off);
+
+ reg_ctrl = high;
+ reg_ctrl |= enable ? 0x100 : 0;
+ writew(reg_ctrl, data->regs + LOONGSON2_THSENS_CTRL_HI_REG + reg_off);
+
+ return 0;
+}
+
+static int loongson2_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
+{
+ u32 reg_val;
+ struct loongson2_thermal_data *data = thermal_zone_device_priv(tz);
+
+ reg_val = readl(data->regs + LOONGSON2_THSENS_OUT_REG);
+ *temp = ((reg_val & LOONGSON2_THSENS_OUT_MASK) - HECTO) * KILO;
+
+ return 0;
+}
+
+static irqreturn_t loongson2_thermal_irq_thread(int irq, void *dev)
+{
+ struct thermal_zone_device *tzd = dev;
+ struct loongson2_thermal_data *data = thermal_zone_device_priv(tzd);
+
+ writeb(LOONGSON2_THSENS_INT_LO | LOONGSON2_THSENS_INT_HIGH, data->regs +
+ LOONGSON2_THSENS_STATUS_REG);
+
+ thermal_zone_device_update(tzd, THERMAL_EVENT_UNSPECIFIED);
+
+ return IRQ_HANDLED;
+}
+
+static int loongson2_thermal_set_trips(struct thermal_zone_device *tz, int low, int high)
+{
+ struct loongson2_thermal_data *data = thermal_zone_device_priv(tz);
+
+ return loongson2_thermal_set(data, low/MILLI, high/MILLI, true);
+}
+
+static const struct thermal_zone_device_ops loongson2_of_thermal_ops = {
+ .get_temp = loongson2_thermal_get_temp,
+ .set_trips = loongson2_thermal_set_trips,
+};
+
+static int loongson2_thermal_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct loongson2_thermal_data *data;
+ struct thermal_zone_device *tzd;
+ int ret, irq, i;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->chip_data = device_get_match_data(dev);
+
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->regs))
+ return PTR_ERR(data->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ writeb(LOONGSON2_THSENS_INT_LO | LOONGSON2_THSENS_INT_HIGH, data->regs +
+ LOONGSON2_THSENS_STATUS_REG);
+
+ loongson2_thermal_set(data, 0, 0, false);
+
+ for (i = 0; i <= LOONGSON2_MAX_SENSOR_SEL_NUM; i++) {
+ tzd = devm_thermal_of_zone_register(dev, i, data,
+ &loongson2_of_thermal_ops);
+
+ if (!IS_ERR(tzd))
+ break;
+
+ if (PTR_ERR(tzd) != ENODEV)
+ continue;
+
+ return dev_err_probe(dev, PTR_ERR(tzd), "failed to register");
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, loongson2_thermal_irq_thread,
+ IRQF_ONESHOT, "loongson2_thermal", tzd);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to request alarm irq\n");
+
+ devm_thermal_add_hwmon_sysfs(dev, tzd);
+
+ return 0;
+}
+
+static const struct loongson2_thermal_chip_data loongson2_thermal_ls2k1000_data = {
+ .thermal_sensor_sel = 0,
+};
+
+static const struct of_device_id of_loongson2_thermal_match[] = {
+ {
+ .compatible = "loongson,ls2k1000-thermal",
+ .data = &loongson2_thermal_ls2k1000_data,
+ },
+ { /* end */ }
+};
+MODULE_DEVICE_TABLE(of, of_loongson2_thermal_match);
+
+static struct platform_driver loongson2_thermal_driver = {
+ .driver = {
+ .name = "loongson2_thermal",
+ .of_match_table = of_loongson2_thermal_match,
+ },
+ .probe = loongson2_thermal_probe,
+};
+module_platform_driver(loongson2_thermal_driver);
+
+MODULE_DESCRIPTION("Loongson2 thermal driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/max77620_thermal.c b/drivers/thermal/max77620_thermal.c
index 61c7622d9945..919b6ee208d8 100644
--- a/drivers/thermal/max77620_thermal.c
+++ b/drivers/thermal/max77620_thermal.c
@@ -139,8 +139,6 @@ static int max77620_thermal_probe(struct platform_device *pdev)
return ret;
}
- platform_set_drvdata(pdev, mtherm);
-
return 0;
}
diff --git a/drivers/thermal/mediatek/auxadc_thermal.c b/drivers/thermal/mediatek/auxadc_thermal.c
index c537aed71017..843214d30bd8 100644
--- a/drivers/thermal/mediatek/auxadc_thermal.c
+++ b/drivers/thermal/mediatek/auxadc_thermal.c
@@ -1282,8 +1282,6 @@ static int mtk_thermal_probe(struct platform_device *pdev)
mtk_thermal_init_bank(mt, i, apmixed_phys_base,
auxadc_phys_base, ctrl_id);
- platform_set_drvdata(pdev, mt);
-
tzdev = devm_thermal_of_zone_register(&pdev->dev, 0, mt,
&mtk_thermal_ops);
if (IS_ERR(tzdev))
diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
index 054c965ae5e1..effd9b00a424 100644
--- a/drivers/thermal/mediatek/lvts_thermal.c
+++ b/drivers/thermal/mediatek/lvts_thermal.c
@@ -58,14 +58,19 @@
#define LVTS_PROTTC(__base) (__base + 0x00CC)
#define LVTS_CLKEN(__base) (__base + 0x00E4)
-#define LVTS_PERIOD_UNIT ((118 * 1000) / (256 * 38))
-#define LVTS_GROUP_INTERVAL 1
-#define LVTS_FILTER_INTERVAL 1
-#define LVTS_SENSOR_INTERVAL 1
-#define LVTS_HW_FILTER 0x2
+#define LVTS_PERIOD_UNIT 0
+#define LVTS_GROUP_INTERVAL 0
+#define LVTS_FILTER_INTERVAL 0
+#define LVTS_SENSOR_INTERVAL 0
+#define LVTS_HW_FILTER 0x0
#define LVTS_TSSEL_CONF 0x13121110
#define LVTS_CALSCALE_CONF 0x300
-#define LVTS_MONINT_CONF 0x9FBF7BDE
+#define LVTS_MONINT_CONF 0x8300318C
+
+#define LVTS_MONINT_OFFSET_SENSOR0 0xC
+#define LVTS_MONINT_OFFSET_SENSOR1 0x180
+#define LVTS_MONINT_OFFSET_SENSOR2 0x3000
+#define LVTS_MONINT_OFFSET_SENSOR3 0x3000000
#define LVTS_INT_SENSOR0 0x0009001F
#define LVTS_INT_SENSOR1 0x001203E0
@@ -81,8 +86,13 @@
#define LVTS_MSR_IMMEDIATE_MODE 0
#define LVTS_MSR_FILTERED_MODE 1
+#define LVTS_MSR_READ_TIMEOUT_US 400
+#define LVTS_MSR_READ_WAIT_US (LVTS_MSR_READ_TIMEOUT_US / 2)
+
#define LVTS_HW_SHUTDOWN_MT8195 105000
+#define LVTS_MINIMUM_THRESHOLD 20000
+
static int golden_temp = LVTS_GOLDEN_TEMP_DEFAULT;
static int coeff_b = LVTS_COEFF_B;
@@ -110,6 +120,8 @@ struct lvts_sensor {
void __iomem *base;
int id;
int dt_id;
+ int low_thresh;
+ int high_thresh;
};
struct lvts_ctrl {
@@ -119,6 +131,8 @@ struct lvts_ctrl {
int num_lvts_sensor;
int mode;
void __iomem *base;
+ int low_thresh;
+ int high_thresh;
};
struct lvts_domain {
@@ -190,7 +204,7 @@ static int lvts_debugfs_init(struct device *dev, struct lvts_domain *lvts_td)
int i;
lvts_td->dom_dentry = debugfs_create_dir(dev_name(dev), NULL);
- if (!lvts_td->dom_dentry)
+ if (IS_ERR(lvts_td->dom_dentry))
return 0;
for (i = 0; i < lvts_td->num_lvts_ctrl; i++) {
@@ -257,6 +271,7 @@ static int lvts_get_temp(struct thermal_zone_device *tz, int *temp)
struct lvts_sensor *lvts_sensor = thermal_zone_device_priv(tz);
void __iomem *msr = lvts_sensor->msr;
u32 value;
+ int rc;
/*
* Measurement registers:
@@ -269,7 +284,8 @@ static int lvts_get_temp(struct thermal_zone_device *tz, int *temp)
* 16 : Valid temperature
* 15-0 : Raw temperature
*/
- value = readl(msr);
+ rc = readl_poll_timeout(msr, value, value & BIT(16),
+ LVTS_MSR_READ_WAIT_US, LVTS_MSR_READ_TIMEOUT_US);
/*
* As the thermal zone temperature will read before the
@@ -282,7 +298,7 @@ static int lvts_get_temp(struct thermal_zone_device *tz, int *temp)
* functionning temperature and directly jump to a system
* shutdown.
*/
- if (!(value & BIT(16)))
+ if (rc)
return -EAGAIN;
*temp = lvts_raw_to_temp(value & 0xFFFF);
@@ -290,32 +306,84 @@ static int lvts_get_temp(struct thermal_zone_device *tz, int *temp)
return 0;
}
+static void lvts_update_irq_mask(struct lvts_ctrl *lvts_ctrl)
+{
+ u32 masks[] = {
+ LVTS_MONINT_OFFSET_SENSOR0,
+ LVTS_MONINT_OFFSET_SENSOR1,
+ LVTS_MONINT_OFFSET_SENSOR2,
+ LVTS_MONINT_OFFSET_SENSOR3,
+ };
+ u32 value = 0;
+ int i;
+
+ value = readl(LVTS_MONINT(lvts_ctrl->base));
+
+ for (i = 0; i < ARRAY_SIZE(masks); i++) {
+ if (lvts_ctrl->sensors[i].high_thresh == lvts_ctrl->high_thresh
+ && lvts_ctrl->sensors[i].low_thresh == lvts_ctrl->low_thresh)
+ value |= masks[i];
+ else
+ value &= ~masks[i];
+ }
+
+ writel(value, LVTS_MONINT(lvts_ctrl->base));
+}
+
+static bool lvts_should_update_thresh(struct lvts_ctrl *lvts_ctrl, int high)
+{
+ int i;
+
+ if (high > lvts_ctrl->high_thresh)
+ return true;
+
+ for (i = 0; i < lvts_ctrl->num_lvts_sensor; i++)
+ if (lvts_ctrl->sensors[i].high_thresh == lvts_ctrl->high_thresh
+ && lvts_ctrl->sensors[i].low_thresh == lvts_ctrl->low_thresh)
+ return false;
+
+ return true;
+}
+
static int lvts_set_trips(struct thermal_zone_device *tz, int low, int high)
{
struct lvts_sensor *lvts_sensor = thermal_zone_device_priv(tz);
+ struct lvts_ctrl *lvts_ctrl = container_of(lvts_sensor, struct lvts_ctrl, sensors[lvts_sensor->id]);
void __iomem *base = lvts_sensor->base;
- u32 raw_low = lvts_temp_to_raw(low);
+ u32 raw_low = lvts_temp_to_raw(low != -INT_MAX ? low : LVTS_MINIMUM_THRESHOLD);
u32 raw_high = lvts_temp_to_raw(high);
+ bool should_update_thresh;
+
+ lvts_sensor->low_thresh = low;
+ lvts_sensor->high_thresh = high;
+
+ should_update_thresh = lvts_should_update_thresh(lvts_ctrl, high);
+ if (should_update_thresh) {
+ lvts_ctrl->high_thresh = high;
+ lvts_ctrl->low_thresh = low;
+ }
+ lvts_update_irq_mask(lvts_ctrl);
+
+ if (!should_update_thresh)
+ return 0;
/*
- * Hot to normal temperature threshold
+ * Low offset temperature threshold
*
- * LVTS_H2NTHRE
+ * LVTS_OFFSETL
*
* Bits:
*
* 14-0 : Raw temperature for threshold
*/
- if (low != -INT_MAX) {
- pr_debug("%s: Setting low limit temperature interrupt: %d\n",
- thermal_zone_device_type(tz), low);
- writel(raw_low, LVTS_H2NTHRE(base));
- }
+ pr_debug("%s: Setting low limit temperature interrupt: %d\n",
+ thermal_zone_device_type(tz), low);
+ writel(raw_low, LVTS_OFFSETL(base));
/*
- * Hot temperature threshold
+ * High offset temperature threshold
*
- * LVTS_HTHRE
+ * LVTS_OFFSETH
*
* Bits:
*
@@ -323,7 +391,7 @@ static int lvts_set_trips(struct thermal_zone_device *tz, int low, int high)
*/
pr_debug("%s: Setting high limit temperature interrupt: %d\n",
thermal_zone_device_type(tz), high);
- writel(raw_high, LVTS_HTHRE(base));
+ writel(raw_high, LVTS_OFFSETH(base));
return 0;
}
@@ -451,7 +519,7 @@ static irqreturn_t lvts_irq_handler(int irq, void *data)
for (i = 0; i < lvts_td->num_lvts_ctrl; i++) {
- aux = lvts_ctrl_irq_handler(lvts_td->lvts_ctrl);
+ aux = lvts_ctrl_irq_handler(&lvts_td->lvts_ctrl[i]);
if (aux != IRQ_HANDLED)
continue;
@@ -521,6 +589,9 @@ static int lvts_sensor_init(struct device *dev, struct lvts_ctrl *lvts_ctrl,
*/
lvts_sensor[i].msr = lvts_ctrl_data->mode == LVTS_MSR_IMMEDIATE_MODE ?
imm_regs[i] : msr_regs[i];
+
+ lvts_sensor[i].low_thresh = INT_MIN;
+ lvts_sensor[i].high_thresh = INT_MIN;
};
lvts_ctrl->num_lvts_sensor = lvts_ctrl_data->num_lvts_sensor;
@@ -688,6 +759,9 @@ static int lvts_ctrl_init(struct device *dev, struct lvts_domain *lvts_td,
*/
lvts_ctrl[i].hw_tshut_raw_temp =
lvts_temp_to_raw(lvts_data->lvts_ctrl[i].hw_tshut_temp);
+
+ lvts_ctrl[i].low_thresh = INT_MIN;
+ lvts_ctrl[i].high_thresh = INT_MIN;
}
/*
@@ -897,24 +971,6 @@ static int lvts_ctrl_configure(struct device *dev, struct lvts_ctrl *lvts_ctrl)
writel(value, LVTS_MSRCTL0(lvts_ctrl->base));
/*
- * LVTS_MSRCTL1 : Measurement control
- *
- * Bits:
- *
- * 9: Ignore MSRCTL0 config and do immediate measurement on sensor3
- * 6: Ignore MSRCTL0 config and do immediate measurement on sensor2
- * 5: Ignore MSRCTL0 config and do immediate measurement on sensor1
- * 4: Ignore MSRCTL0 config and do immediate measurement on sensor0
- *
- * That configuration will ignore the filtering and the delays
- * introduced below in MONCTL1 and MONCTL2
- */
- if (lvts_ctrl->mode == LVTS_MSR_IMMEDIATE_MODE) {
- value = BIT(9) | BIT(6) | BIT(5) | BIT(4);
- writel(value, LVTS_MSRCTL1(lvts_ctrl->base));
- }
-
- /*
* LVTS_MONCTL1 : Period unit and group interval configuration
*
* The clock source of LVTS thermal controller is 26MHz.
@@ -979,6 +1035,15 @@ static int lvts_ctrl_start(struct device *dev, struct lvts_ctrl *lvts_ctrl)
struct thermal_zone_device *tz;
u32 sensor_map = 0;
int i;
+ /*
+ * Bitmaps to enable each sensor on immediate and filtered modes, as
+ * described in MSRCTL1 and MONCTL0 registers below, respectively.
+ */
+ u32 sensor_imm_bitmap[] = { BIT(4), BIT(5), BIT(6), BIT(9) };
+ u32 sensor_filt_bitmap[] = { BIT(0), BIT(1), BIT(2), BIT(3) };
+
+ u32 *sensor_bitmap = lvts_ctrl->mode == LVTS_MSR_IMMEDIATE_MODE ?
+ sensor_imm_bitmap : sensor_filt_bitmap;
for (i = 0; i < lvts_ctrl->num_lvts_sensor; i++) {
@@ -1016,20 +1081,38 @@ static int lvts_ctrl_start(struct device *dev, struct lvts_ctrl *lvts_ctrl)
* map, so we can enable the temperature monitoring in
* the hardware thermal controller.
*/
- sensor_map |= BIT(i);
+ sensor_map |= sensor_bitmap[i];
}
/*
- * Bits:
- * 9: Single point access flow
- * 0-3: Enable sensing point 0-3
- *
* The initialization of the thermal zones give us
* which sensor point to enable. If any thermal zone
* was not described in the device tree, it won't be
* enabled here in the sensor map.
*/
- writel(sensor_map | BIT(9), LVTS_MONCTL0(lvts_ctrl->base));
+ if (lvts_ctrl->mode == LVTS_MSR_IMMEDIATE_MODE) {
+ /*
+ * LVTS_MSRCTL1 : Measurement control
+ *
+ * Bits:
+ *
+ * 9: Ignore MSRCTL0 config and do immediate measurement on sensor3
+ * 6: Ignore MSRCTL0 config and do immediate measurement on sensor2
+ * 5: Ignore MSRCTL0 config and do immediate measurement on sensor1
+ * 4: Ignore MSRCTL0 config and do immediate measurement on sensor0
+ *
+ * That configuration will ignore the filtering and the delays
+ * introduced in MONCTL1 and MONCTL2
+ */
+ writel(sensor_map, LVTS_MSRCTL1(lvts_ctrl->base));
+ } else {
+ /*
+ * Bits:
+ * 9: Single point access flow
+ * 0-3: Enable sensing point 0-3
+ */
+ writel(sensor_map | BIT(9), LVTS_MONCTL0(lvts_ctrl->base));
+ }
return 0;
}
@@ -1138,7 +1221,7 @@ static int lvts_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return dev_err_probe(dev, irq, "No irq resource\n");
+ return irq;
ret = lvts_domain_init(dev, lvts_td, lvts_data);
if (ret)
diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c
index a941b4241b0a..87c09f62ee81 100644
--- a/drivers/thermal/qcom/tsens-v0_1.c
+++ b/drivers/thermal/qcom/tsens-v0_1.c
@@ -23,7 +23,7 @@
#define BIT_APPEND 0x3
-struct tsens_legacy_calibration_format tsens_8916_nvmem = {
+static struct tsens_legacy_calibration_format tsens_8916_nvmem = {
.base_len = 7,
.base_shift = 3,
.sp_len = 5,
@@ -39,7 +39,7 @@ struct tsens_legacy_calibration_format tsens_8916_nvmem = {
},
};
-struct tsens_legacy_calibration_format tsens_8974_nvmem = {
+static struct tsens_legacy_calibration_format tsens_8974_nvmem = {
.base_len = 8,
.base_shift = 2,
.sp_len = 6,
@@ -61,7 +61,7 @@ struct tsens_legacy_calibration_format tsens_8974_nvmem = {
},
};
-struct tsens_legacy_calibration_format tsens_8974_backup_nvmem = {
+static struct tsens_legacy_calibration_format tsens_8974_backup_nvmem = {
.base_len = 8,
.base_shift = 2,
.sp_len = 6,
diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c
index 51322430f1fe..dc1c4ae2d8b0 100644
--- a/drivers/thermal/qcom/tsens-v1.c
+++ b/drivers/thermal/qcom/tsens-v1.c
@@ -21,7 +21,7 @@
#define TM_HIGH_LOW_INT_STATUS_OFF 0x0088
#define TM_HIGH_LOW_Sn_INT_THRESHOLD_OFF 0x0090
-struct tsens_legacy_calibration_format tsens_qcs404_nvmem = {
+static struct tsens_legacy_calibration_format tsens_qcs404_nvmem = {
.base_len = 8,
.base_shift = 2,
.sp_len = 6,
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 58f4d8f7a3fd..e5bc2c82010f 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -887,7 +887,7 @@ static int exynos_map_dt_data(struct platform_device *pdev)
return -EADDRNOTAVAIL;
}
- data->soc = (enum soc_type)of_device_get_match_data(&pdev->dev);
+ data->soc = (uintptr_t)of_device_get_match_data(&pdev->dev);
switch (data->soc) {
case SOC_ARCH_EXYNOS4210:
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
index 6e78616a576e..96d99289799a 100644
--- a/drivers/thermal/spear_thermal.c
+++ b/drivers/thermal/spear_thermal.c
@@ -122,8 +122,8 @@ static int spear_thermal_probe(struct platform_device *pdev)
stdev->flags = val;
writel_relaxed(stdev->flags, stdev->thermal_base);
- spear_thermal = thermal_zone_device_register("spear_thermal", 0, 0,
- stdev, &ops, NULL, 0, 0);
+ spear_thermal = thermal_tripless_zone_device_register("spear_thermal",
+ stdev, &ops, NULL);
if (IS_ERR(spear_thermal)) {
dev_err(&pdev->dev, "thermal zone device is NULL\n");
ret = PTR_ERR(spear_thermal);
diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c
index cca16d632d9f..f989b55a8aa8 100644
--- a/drivers/thermal/sun8i_thermal.c
+++ b/drivers/thermal/sun8i_thermal.c
@@ -56,8 +56,6 @@
#define SUN50I_H6_THS_PC_TEMP_PERIOD(x) ((GENMASK(19, 0) & (x)) << 12)
#define SUN50I_H6_THS_DATA_IRQ_STS(x) BIT(x)
-/* millidegree celsius */
-
struct tsensor {
struct ths_device *tmdev;
struct thermal_zone_device *tzd;
@@ -286,7 +284,7 @@ static int sun8i_ths_calibrate(struct ths_device *tmdev)
size_t callen;
int ret = 0;
- calcell = devm_nvmem_cell_get(dev, "calibration");
+ calcell = nvmem_cell_get(dev, "calibration");
if (IS_ERR(calcell)) {
if (PTR_ERR(calcell) == -EPROBE_DEFER)
return -EPROBE_DEFER;
@@ -316,6 +314,8 @@ static int sun8i_ths_calibrate(struct ths_device *tmdev)
kfree(caldata);
out:
+ if (!IS_ERR(calcell))
+ nvmem_cell_put(calcell);
return ret;
}
@@ -489,8 +489,6 @@ static int sun8i_ths_probe(struct platform_device *pdev)
if (!tmdev->chip)
return -EINVAL;
- platform_set_drvdata(pdev, tmdev);
-
ret = sun8i_ths_resource_init(tmdev);
if (ret)
return ret;
diff --git a/drivers/thermal/tegra/tegra-bpmp-thermal.c b/drivers/thermal/tegra/tegra-bpmp-thermal.c
index a2879d624945..4ffc3bb3bf35 100644
--- a/drivers/thermal/tegra/tegra-bpmp-thermal.c
+++ b/drivers/thermal/tegra/tegra-bpmp-thermal.c
@@ -167,19 +167,69 @@ static int tegra_bpmp_thermal_get_num_zones(struct tegra_bpmp *bpmp,
return 0;
}
+static int tegra_bpmp_thermal_trips_supported(struct tegra_bpmp *bpmp, bool *supported)
+{
+ struct mrq_thermal_host_to_bpmp_request req;
+ union mrq_thermal_bpmp_to_host_response reply;
+ struct tegra_bpmp_message msg;
+ int err;
+
+ memset(&req, 0, sizeof(req));
+ req.type = CMD_THERMAL_QUERY_ABI;
+ req.query_abi.type = CMD_THERMAL_SET_TRIP;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_THERMAL;
+ msg.tx.data = &req;
+ msg.tx.size = sizeof(req);
+ msg.rx.data = &reply;
+ msg.rx.size = sizeof(reply);
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err)
+ return err;
+
+ if (msg.rx.ret == 0) {
+ *supported = true;
+ return 0;
+ } else if (msg.rx.ret == -BPMP_ENODEV) {
+ *supported = false;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
static const struct thermal_zone_device_ops tegra_bpmp_of_thermal_ops = {
.get_temp = tegra_bpmp_thermal_get_temp,
.set_trips = tegra_bpmp_thermal_set_trips,
};
+static const struct thermal_zone_device_ops tegra_bpmp_of_thermal_ops_notrips = {
+ .get_temp = tegra_bpmp_thermal_get_temp,
+};
+
static int tegra_bpmp_thermal_probe(struct platform_device *pdev)
{
struct tegra_bpmp *bpmp = dev_get_drvdata(pdev->dev.parent);
+ const struct thermal_zone_device_ops *thermal_ops;
struct tegra_bpmp_thermal *tegra;
struct thermal_zone_device *tzd;
unsigned int i, max_num_zones;
+ bool supported;
int err;
+ err = tegra_bpmp_thermal_trips_supported(bpmp, &supported);
+ if (err) {
+ dev_err(&pdev->dev, "failed to determine if trip points are supported\n");
+ return err;
+ }
+
+ if (supported)
+ thermal_ops = &tegra_bpmp_of_thermal_ops;
+ else
+ thermal_ops = &tegra_bpmp_of_thermal_ops_notrips;
+
tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
if (!tegra)
return -ENOMEM;
@@ -222,7 +272,7 @@ static int tegra_bpmp_thermal_probe(struct platform_device *pdev)
}
tzd = devm_thermal_of_zone_register(
- &pdev->dev, i, zone, &tegra_bpmp_of_thermal_ops);
+ &pdev->dev, i, zone, thermal_ops);
if (IS_ERR(tzd)) {
if (PTR_ERR(tzd) == -EPROBE_DEFER)
return -EPROBE_DEFER;
diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
index f4f1a04f8c0f..1717e4a19dcb 100644
--- a/drivers/thermal/thermal-generic-adc.c
+++ b/drivers/thermal/thermal-generic-adc.c
@@ -142,7 +142,6 @@ static int gadc_thermal_probe(struct platform_device *pdev)
return ret;
gti->dev = &pdev->dev;
- platform_set_drvdata(pdev, gti);
gti->tz_dev = devm_thermal_of_zone_register(&pdev->dev, 0, gti,
&gadc_thermal_ops);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index a59700593d32..58533ea75cd9 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -348,12 +348,14 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip_id)
struct thermal_trip trip;
/* Ignore disabled trip points */
- if (test_bit(trip_id, &tz->trips_disabled) ||
- trip.temperature == THERMAL_TEMP_INVALID)
+ if (test_bit(trip_id, &tz->trips_disabled))
return;
__thermal_zone_get_trip(tz, trip_id, &trip);
+ if (trip.temperature == THERMAL_TEMP_INVALID)
+ return;
+
if (tz->last_temperature != THERMAL_TEMP_INVALID) {
if (tz->last_temperature < trip.temperature &&
tz->temperature >= trip.temperature)
@@ -1266,7 +1268,7 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
return ERR_PTR(-EINVAL);
}
- if (num_trips > 0 && (!ops->get_trip_type || !ops->get_trip_temp) && !trips)
+ if (num_trips > 0 && !trips)
return ERR_PTR(-EINVAL);
if (!thermal_class)
@@ -1389,16 +1391,16 @@ free_tz:
}
EXPORT_SYMBOL_GPL(thermal_zone_device_register_with_trips);
-struct thermal_zone_device *thermal_zone_device_register(const char *type, int ntrips, int mask,
- void *devdata, struct thermal_zone_device_ops *ops,
- const struct thermal_zone_params *tzp, int passive_delay,
- int polling_delay)
+struct thermal_zone_device *thermal_tripless_zone_device_register(
+ const char *type,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp)
{
- return thermal_zone_device_register_with_trips(type, NULL, ntrips, mask,
- devdata, ops, tzp,
- passive_delay, polling_delay);
+ return thermal_zone_device_register_with_trips(type, NULL, 0, 0, devdata,
+ ops, tzp, 0, 0);
}
-EXPORT_SYMBOL_GPL(thermal_zone_device_register);
+EXPORT_SYMBOL_GPL(thermal_tripless_zone_device_register);
void *thermal_zone_device_priv(struct thermal_zone_device *tzd)
{
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 04513f9fbfa1..de884bea28b6 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -70,7 +70,7 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
void thermal_cdev_update(struct thermal_cooling_device *);
void __thermal_cdev_update(struct thermal_cooling_device *cdev);
-int get_tz_trend(struct thermal_zone_device *tz, int trip);
+int get_tz_trend(struct thermal_zone_device *tz, int trip_index);
struct thermal_instance *
get_thermal_instance(struct thermal_zone_device *tz,
diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
index cfba0965a22d..4d66372c9629 100644
--- a/drivers/thermal/thermal_helpers.c
+++ b/drivers/thermal/thermal_helpers.c
@@ -22,8 +22,9 @@
#include "thermal_core.h"
#include "thermal_trace.h"
-int get_tz_trend(struct thermal_zone_device *tz, int trip)
+int get_tz_trend(struct thermal_zone_device *tz, int trip_index)
{
+ struct thermal_trip *trip = tz->trips ? &tz->trips[trip_index] : NULL;
enum thermal_trend trend;
if (tz->emul_temperature || !tz->ops->get_trend ||
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index 4ca905723429..1e0655b63259 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -37,8 +37,10 @@ static int of_find_trip_id(struct device_node *np, struct device_node *trip)
*/
for_each_child_of_node(trips, t) {
- if (t == trip)
+ if (t == trip) {
+ of_node_put(t);
goto out;
+ }
i++;
}
@@ -401,8 +403,10 @@ static int thermal_of_for_each_cooling_maps(struct thermal_zone_device *tz,
for_each_child_of_node(cm_np, child) {
ret = thermal_of_for_each_cooling_device(tz_np, child, tz, cdev, action);
- if (ret)
+ if (ret) {
+ of_node_put(child);
break;
+ }
}
of_node_put(cm_np);
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index 6c20c9f90a05..4e6a97db894e 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -185,9 +185,6 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1)
return -EINVAL;
- if (kstrtoint(buf, 10, &trip.hysteresis))
- return -EINVAL;
-
mutex_lock(&tz->lock);
if (!device_is_registered(dev)) {
@@ -198,7 +195,11 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
ret = __thermal_zone_get_trip(tz, trip_id, &trip);
if (ret)
goto unlock;
-
+
+ ret = kstrtoint(buf, 10, &trip.hysteresis);
+ if (ret)
+ goto unlock;
+
ret = thermal_zone_set_trip(tz, trip_id, &trip);
unlock:
mutex_unlock(&tz->lock);
diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c
index 53115cfdfd42..024e2e365a26 100644
--- a/drivers/thermal/thermal_trip.c
+++ b/drivers/thermal/thermal_trip.c
@@ -101,29 +101,11 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
struct thermal_trip *trip)
{
- int ret;
-
- if (!tz || trip_id < 0 || trip_id >= tz->num_trips || !trip)
+ if (!tz || !tz->trips || trip_id < 0 || trip_id >= tz->num_trips || !trip)
return -EINVAL;
- if (tz->trips) {
- *trip = tz->trips[trip_id];
- return 0;
- }
-
- if (tz->ops->get_trip_hyst) {
- ret = tz->ops->get_trip_hyst(tz, trip_id, &trip->hysteresis);
- if (ret)
- return ret;
- } else {
- trip->hysteresis = 0;
- }
-
- ret = tz->ops->get_trip_temp(tz, trip_id, &trip->temperature);
- if (ret)
- return ret;
-
- return tz->ops->get_trip_type(tz, trip_id, &trip->type);
+ *trip = tz->trips[trip_id];
+ return 0;
}
EXPORT_SYMBOL_GPL(__thermal_zone_get_trip);
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index a1c9a1530183..0c2eb9c6e58b 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -314,7 +314,7 @@ int ti_bandgap_adc_to_mcelsius(struct ti_bandgap *bgp, int adc_val, int *t)
*/
static inline int ti_bandgap_validate(struct ti_bandgap *bgp, int id)
{
- if (!bgp || IS_ERR(bgp)) {
+ if (IS_ERR_OR_NULL(bgp)) {
pr_err("%s: invalid bandgap pointer\n", __func__);
return -EINVAL;
}
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index d414a4b7a94a..0cf0826b805a 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -109,7 +109,9 @@ static inline int __ti_thermal_get_temp(struct thermal_zone_device *tz, int *tem
return ret;
}
-static int __ti_thermal_get_trend(struct thermal_zone_device *tz, int trip, enum thermal_trend *trend)
+static int __ti_thermal_get_trend(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ enum thermal_trend *trend)
{
struct ti_thermal_data *data = thermal_zone_device_priv(tz);
struct ti_bandgap *bgp;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index b3550ff9c494..1f3aba607cd5 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -3097,10 +3097,8 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
gsm->has_devices = false;
}
for (i = NUM_DLCI - 1; i >= 0; i--)
- if (gsm->dlci[i]) {
+ if (gsm->dlci[i])
gsm_dlci_release(gsm->dlci[i]);
- gsm->dlci[i] = NULL;
- }
mutex_unlock(&gsm->mutex);
/* Now wipe the queues */
tty_ldisc_flush(gsm->tty);
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index fb891b67968f..141627370aab 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1936,7 +1936,10 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
skip_rx = true;
if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) {
- if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
+ struct irq_data *d;
+
+ d = irq_get_irq_data(port->irq);
+ if (d && irqd_is_wakeup_set(d))
pm_wakeup_event(tport->tty->dev, 0);
if (!up->dma || handle_rx_dma(up, iir))
status = serial8250_rx_chars(up, status);
diff --git a/drivers/ufs/core/Kconfig b/drivers/ufs/core/Kconfig
index e11978171403..817208ee64ec 100644
--- a/drivers/ufs/core/Kconfig
+++ b/drivers/ufs/core/Kconfig
@@ -35,14 +35,6 @@ config SCSI_UFS_CRYPTO
capabilities of the UFS device (if present) to perform crypto
operations on data being transferred to/from the device.
-config SCSI_UFS_HPB
- bool "Support UFS Host Performance Booster"
- help
- The UFS HPB feature improves random read performance. It caches
- L2P (logical to physical) map of UFS to host DRAM. The driver uses HPB
- read command by piggybacking physical page number for bypassing FTL (flash
- translation layer)'s L2P address translation.
-
config SCSI_UFS_FAULT_INJECTION
bool "UFS Fault Injection Support"
depends on FAULT_INJECTION
diff --git a/drivers/ufs/core/Makefile b/drivers/ufs/core/Makefile
index 4d02e0f2de10..cf820fa09a04 100644
--- a/drivers/ufs/core/Makefile
+++ b/drivers/ufs/core/Makefile
@@ -5,6 +5,5 @@ ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o
ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
-ufshcd-core-$(CONFIG_SCSI_UFS_HPB) += ufshpb.o
ufshcd-core-$(CONFIG_SCSI_UFS_FAULT_INJECTION) += ufs-fault-injection.o
ufshcd-core-$(CONFIG_SCSI_UFS_HWMON) += ufs-hwmon.o
diff --git a/drivers/ufs/core/ufs-hwmon.c b/drivers/ufs/core/ufs-hwmon.c
index 101d7082446f..34194064367f 100644
--- a/drivers/ufs/core/ufs-hwmon.c
+++ b/drivers/ufs/core/ufs-hwmon.c
@@ -127,7 +127,8 @@ static int ufs_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32
return err;
}
-static umode_t ufs_hwmon_is_visible(const void *_data, enum hwmon_sensor_types type, u32 attr,
+static umode_t ufs_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type, u32 attr,
int channel)
{
if (type != hwmon_temp)
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 386674ead7f0..2ba8ec254dce 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -97,6 +97,7 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds);
ufshcd_writel(hba, val, REG_UFS_MCQ_CFG);
}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
/**
* ufshcd_mcq_req_to_hwq - find the hardware queue on which the
@@ -104,7 +105,7 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
* @hba: per adapter instance
* @req: pointer to the request to be issued
*
- * Returns the hardware queue instance on which the request would
+ * Return: the hardware queue instance on which the request would
* be queued.
*/
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
@@ -120,7 +121,7 @@ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
* ufshcd_mcq_decide_queue_depth - decide the queue depth
* @hba: per adapter instance
*
- * Returns queue-depth on success, non-zero on error
+ * Return: queue-depth on success, non-zero on error
*
* MAC - Max. Active Command of the Host Controller (HC)
* HC wouldn't send more than this commands to the device.
@@ -245,6 +246,7 @@ u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i)
{
return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_read_cqis);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i)
{
@@ -388,6 +390,7 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
MCQ_CFG_n(REG_SQATTR, i));
}
}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational);
void ufshcd_mcq_enable_esi(struct ufs_hba *hba)
{
@@ -487,10 +490,10 @@ static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
/**
* ufshcd_mcq_sq_cleanup - Clean up submission queue resources
* associated with the pending command.
- * @hba - per adapter instance.
- * @task_tag - The command's task tag.
+ * @hba: per adapter instance.
+ * @task_tag: The command's task tag.
*
- * Returns 0 for success; error code otherwise.
+ * Return: 0 for success; error code otherwise.
*/
int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
{
@@ -551,16 +554,11 @@ unlock:
* Write the sqe's Command Type to 0xF. The host controller will not
* fetch any sqe with Command Type = 0xF.
*
- * @utrd - UTP Transfer Request Descriptor to be nullified.
+ * @utrd: UTP Transfer Request Descriptor to be nullified.
*/
static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd)
{
- u32 dword_0;
-
- dword_0 = le32_to_cpu(utrd->header.dword_0);
- dword_0 &= ~UPIU_COMMAND_TYPE_MASK;
- dword_0 |= FIELD_PREP(UPIU_COMMAND_TYPE_MASK, 0xF);
- utrd->header.dword_0 = cpu_to_le32(dword_0);
+ utrd->header.command_type = 0xf;
}
/**
@@ -568,11 +566,11 @@ static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd)
* If the command is in the submission queue and not issued to the device yet,
* nullify the sqe so the host controller will skip fetching the sqe.
*
- * @hba - per adapter instance.
- * @hwq - Hardware Queue to be searched.
- * @task_tag - The command's task tag.
+ * @hba: per adapter instance.
+ * @hwq: Hardware Queue to be searched.
+ * @task_tag: The command's task tag.
*
- * Returns true if the SQE containing the command is present in the SQ
+ * Return: true if the SQE containing the command is present in the SQ
* (not fetched by the controller); returns false if the SQE is not in the SQ.
*/
static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
@@ -621,9 +619,9 @@ out:
/**
* ufshcd_mcq_abort - Abort the command in MCQ.
- * @cmd - The command to be aborted.
+ * @cmd: The command to be aborted.
*
- * Returns SUCCESS or FAILED error codes
+ * Return: SUCCESS or FAILED error codes
*/
int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
{
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 6c72075750dd..c95906443d5f 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -718,8 +718,6 @@ UFS_DEVICE_DESC_PARAM(device_version, _DEV_VER, 2);
UFS_DEVICE_DESC_PARAM(number_of_secure_wpa, _NUM_SEC_WPA, 1);
UFS_DEVICE_DESC_PARAM(psa_max_data_size, _PSA_MAX_DATA, 4);
UFS_DEVICE_DESC_PARAM(psa_state_timeout, _PSA_TMT, 1);
-UFS_DEVICE_DESC_PARAM(hpb_version, _HPB_VER, 2);
-UFS_DEVICE_DESC_PARAM(hpb_control, _HPB_CONTROL, 1);
UFS_DEVICE_DESC_PARAM(ext_feature_sup, _EXT_UFS_FEATURE_SUP, 4);
UFS_DEVICE_DESC_PARAM(wb_presv_us_en, _WB_PRESRV_USRSPC_EN, 1);
UFS_DEVICE_DESC_PARAM(wb_type, _WB_TYPE, 1);
@@ -752,8 +750,6 @@ static struct attribute *ufs_sysfs_device_descriptor[] = {
&dev_attr_number_of_secure_wpa.attr,
&dev_attr_psa_max_data_size.attr,
&dev_attr_psa_state_timeout.attr,
- &dev_attr_hpb_version.attr,
- &dev_attr_hpb_control.attr,
&dev_attr_ext_feature_sup.attr,
&dev_attr_wb_presv_us_en.attr,
&dev_attr_wb_type.attr,
@@ -827,10 +823,6 @@ UFS_GEOMETRY_DESC_PARAM(enh4_memory_max_alloc_units,
_ENM4_MAX_NUM_UNITS, 4);
UFS_GEOMETRY_DESC_PARAM(enh4_memory_capacity_adjustment_factor,
_ENM4_CAP_ADJ_FCTR, 2);
-UFS_GEOMETRY_DESC_PARAM(hpb_region_size, _HPB_REGION_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(hpb_number_lu, _HPB_NUMBER_LU, 1);
-UFS_GEOMETRY_DESC_PARAM(hpb_subregion_size, _HPB_SUBREGION_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(hpb_max_active_regions, _HPB_MAX_ACTIVE_REGS, 2);
UFS_GEOMETRY_DESC_PARAM(wb_max_alloc_units, _WB_MAX_ALLOC_UNITS, 4);
UFS_GEOMETRY_DESC_PARAM(wb_max_wb_luns, _WB_MAX_WB_LUNS, 1);
UFS_GEOMETRY_DESC_PARAM(wb_buff_cap_adj, _WB_BUFF_CAP_ADJ, 1);
@@ -868,10 +860,6 @@ static struct attribute *ufs_sysfs_geometry_descriptor[] = {
&dev_attr_enh3_memory_capacity_adjustment_factor.attr,
&dev_attr_enh4_memory_max_alloc_units.attr,
&dev_attr_enh4_memory_capacity_adjustment_factor.attr,
- &dev_attr_hpb_region_size.attr,
- &dev_attr_hpb_number_lu.attr,
- &dev_attr_hpb_subregion_size.attr,
- &dev_attr_hpb_max_active_regions.attr,
&dev_attr_wb_max_alloc_units.attr,
&dev_attr_wb_max_wb_luns.attr,
&dev_attr_wb_buff_cap_adj.attr,
@@ -1132,7 +1120,6 @@ UFS_FLAG(disable_fw_update, _PERMANENTLY_DISABLE_FW_UPDATE);
UFS_FLAG(wb_enable, _WB_EN);
UFS_FLAG(wb_flush_en, _WB_BUFF_FLUSH_EN);
UFS_FLAG(wb_flush_during_h8, _WB_BUFF_FLUSH_DURING_HIBERN8);
-UFS_FLAG(hpb_enable, _HPB_EN);
static struct attribute *ufs_sysfs_device_flags[] = {
&dev_attr_device_init.attr,
@@ -1146,7 +1133,6 @@ static struct attribute *ufs_sysfs_device_flags[] = {
&dev_attr_wb_enable.attr,
&dev_attr_wb_flush_en.attr,
&dev_attr_wb_flush_during_h8.attr,
- &dev_attr_hpb_enable.attr,
NULL,
};
@@ -1193,7 +1179,6 @@ out: \
static DEVICE_ATTR_RO(_name)
UFS_ATTRIBUTE(boot_lun_enabled, _BOOT_LU_EN);
-UFS_ATTRIBUTE(max_data_size_hpb_single_cmd, _MAX_HPB_SINGLE_CMD);
UFS_ATTRIBUTE(current_power_mode, _POWER_MODE);
UFS_ATTRIBUTE(active_icc_level, _ACTIVE_ICC_LVL);
UFS_ATTRIBUTE(ooo_data_enabled, _OOO_DATA_EN);
@@ -1217,7 +1202,6 @@ UFS_ATTRIBUTE(wb_cur_buf, _CURR_WB_BUFF_SIZE);
static struct attribute *ufs_sysfs_attributes[] = {
&dev_attr_boot_lun_enabled.attr,
- &dev_attr_max_data_size_hpb_single_cmd.attr,
&dev_attr_current_power_mode.attr,
&dev_attr_active_icc_level.attr,
&dev_attr_ooo_data_enabled.attr,
@@ -1291,9 +1275,6 @@ UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1);
UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8);
UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2);
UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1);
-UFS_UNIT_DESC_PARAM(hpb_lu_max_active_regions, _HPB_LU_MAX_ACTIVE_RGNS, 2);
-UFS_UNIT_DESC_PARAM(hpb_pinned_region_start_offset, _HPB_PIN_RGN_START_OFF, 2);
-UFS_UNIT_DESC_PARAM(hpb_number_pinned_regions, _HPB_NUM_PIN_RGNS, 2);
UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4);
static struct attribute *ufs_sysfs_unit_descriptor[] = {
@@ -1311,9 +1292,6 @@ static struct attribute *ufs_sysfs_unit_descriptor[] = {
&dev_attr_physical_memory_resourse_count.attr,
&dev_attr_context_capabilities.attr,
&dev_attr_large_unit_granularity.attr,
- &dev_attr_hpb_lu_max_active_regions.attr,
- &dev_attr_hpb_pinned_region_start_offset.attr,
- &dev_attr_hpb_number_pinned_regions.attr,
&dev_attr_wb_buf_alloc_units.attr,
NULL,
};
diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
index 0d38e7fa34cc..374e5aae4e7e 100644
--- a/drivers/ufs/core/ufs_bsg.c
+++ b/drivers/ufs/core/ufs_bsg.c
@@ -76,8 +76,7 @@ static int ufs_bsg_exec_advanced_rpmb_req(struct ufs_hba *hba, struct bsg_job *j
int ret;
int data_len;
- if (hba->ufs_version < ufshci_version(4, 0) || !hba->dev_info.b_advanced_rpmb_en ||
- !(hba->capabilities & MASK_EHSLUTRD_SUPPORTED))
+ if (hba->ufs_version < ufshci_version(4, 0) || !hba->dev_info.b_advanced_rpmb_en)
return -EINVAL;
if (rpmb_request->ehs_req.length != 2 || rpmb_request->ehs_req.ehs_type != 1)
@@ -232,6 +231,8 @@ static inline void ufs_bsg_node_release(struct device *dev)
* @hba: per adapter object
*
* Called during initial loading of the driver, and before scsi_scan_host.
+ *
+ * Returns: 0 (success).
*/
int ufs_bsg_probe(struct ufs_hba *hba)
{
diff --git a/drivers/ufs/core/ufshcd-crypto.h b/drivers/ufs/core/ufshcd-crypto.h
index 504cc841540b..be8596f20ba2 100644
--- a/drivers/ufs/core/ufshcd-crypto.h
+++ b/drivers/ufs/core/ufshcd-crypto.h
@@ -26,15 +26,15 @@ static inline void ufshcd_prepare_lrbp_crypto(struct request *rq,
}
static inline void
-ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp, u32 *dword_0,
- u32 *dword_1, u32 *dword_3)
+ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp,
+ struct request_desc_header *h)
{
- if (lrbp->crypto_key_slot >= 0) {
- *dword_0 |= UTP_REQ_DESC_CRYPTO_ENABLE_CMD;
- *dword_0 |= lrbp->crypto_key_slot;
- *dword_1 = lower_32_bits(lrbp->data_unit_num);
- *dword_3 = upper_32_bits(lrbp->data_unit_num);
- }
+ if (lrbp->crypto_key_slot < 0)
+ return;
+ h->enable_crypto = 1;
+ h->cci = lrbp->crypto_key_slot;
+ h->dunl = cpu_to_le32(lower_32_bits(lrbp->data_unit_num));
+ h->dunu = cpu_to_le32(upper_32_bits(lrbp->data_unit_num));
}
bool ufshcd_crypto_enable(struct ufs_hba *hba);
@@ -51,8 +51,8 @@ static inline void ufshcd_prepare_lrbp_crypto(struct request *rq,
struct ufshcd_lrb *lrbp) { }
static inline void
-ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp, u32 *dword_0,
- u32 *dword_1, u32 *dword_3) { }
+ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp,
+ struct request_desc_header *h) { }
static inline bool ufshcd_crypto_enable(struct ufs_hba *hba)
{
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 0f3bd943b58b..f42d99ce5bf1 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -93,7 +93,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
struct utp_upiu_req *req_upiu,
struct utp_upiu_req *rsp_upiu,
- int msgcode,
+ enum upiu_request_transaction msgcode,
u8 *desc_buff, int *buff_len,
enum query_opcode desc_op);
@@ -294,7 +294,7 @@ extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
* ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
* @scsi_lun: scsi LUN id
*
- * Returns UPIU LUN id
+ * Return: UPIU LUN id
*/
static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
{
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 129446775796..c2df07545f96 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/sched/clock.h>
+#include <linux/iopoll.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_driver.h>
@@ -34,7 +35,6 @@
#include "ufs-fault-injection.h"
#include "ufs_bsg.h"
#include "ufshcd-crypto.h"
-#include "ufshpb.h"
#include <asm/unaligned.h>
#define CREATE_TRACE_POINTS
@@ -238,8 +238,7 @@ static const struct ufs_dev_quirk ufs_fixups[] = {
/* UFS cards deviations table */
{ .wmanufacturerid = UFS_VENDOR_MICRON,
.model = UFS_ANY_MODEL,
- .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
- UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ },
+ .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
{ .wmanufacturerid = UFS_VENDOR_SAMSUNG,
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
@@ -703,8 +702,7 @@ EXPORT_SYMBOL_GPL(ufshcd_delay_us);
* @interval_us: polling interval in microseconds
* @timeout_ms: timeout in milliseconds
*
- * Return:
- * -ETIMEDOUT on error, zero on success.
+ * Return: -ETIMEDOUT on error, zero on success.
*/
static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
u32 val, unsigned long interval_us,
@@ -732,7 +730,7 @@ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
* ufshcd_get_intr_mask - Get the interrupt bit mask
* @hba: Pointer to adapter instance
*
- * Returns interrupt bit mask per version
+ * Return: interrupt bit mask per version
*/
static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
{
@@ -748,7 +746,7 @@ static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
* ufshcd_get_ufs_version - Get the UFS version supported by the HBA
* @hba: Pointer to adapter instance
*
- * Returns UFSHCI version supported by the controller
+ * Return: UFSHCI version supported by the controller
*/
static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
{
@@ -775,7 +773,7 @@ static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
* the host controller
* @hba: pointer to adapter instance
*
- * Returns true if device present, false if no device detected
+ * Return: true if device present, false if no device detected
*/
static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
{
@@ -788,7 +786,8 @@ static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
* @cqe: pointer to the completion queue entry
*
* This function is used to get the OCS field from UTRD
- * Returns the OCS field in the UTRD
+ *
+ * Return: the OCS field in the UTRD.
*/
static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp,
struct cq_entry *cqe)
@@ -796,7 +795,7 @@ static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp,
if (cqe)
return le32_to_cpu(cqe->status) & MASK_OCS;
- return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
+ return lrbp->utr_descriptor_ptr->header.ocs & MASK_OCS;
}
/**
@@ -841,7 +840,7 @@ static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
* ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
* @reg: Register value of host controller status
*
- * Returns integer, 0 on Success and positive value if failed
+ * Return: 0 on success; a positive value if failed.
*/
static inline int ufshcd_get_lists_status(u32 reg)
{
@@ -853,7 +852,8 @@ static inline int ufshcd_get_lists_status(u32 reg)
* @hba: Pointer to adapter instance
*
* This function gets the result of UIC command completion
- * Returns 0 on success, non zero value on error
+ *
+ * Return: 0 on success; non-zero value on error.
*/
static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
{
@@ -866,7 +866,8 @@ static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
* @hba: Pointer to adapter instance
*
* This function gets UIC command argument3
- * Returns 0 on success, non zero value on error
+ *
+ * Return: 0 on success; non-zero value on error.
*/
static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
{
@@ -876,38 +877,13 @@ static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
/**
* ufshcd_get_req_rsp - returns the TR response transaction type
* @ucd_rsp_ptr: pointer to response UPIU
- */
-static inline int
-ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
-{
- return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
-}
-
-/**
- * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
- * @ucd_rsp_ptr: pointer to response UPIU
- *
- * This function gets the response status and scsi_status from response UPIU
- * Returns the response result code.
- */
-static inline int
-ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
-{
- return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
-}
-
-/*
- * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
- * from response UPIU
- * @ucd_rsp_ptr: pointer to response UPIU
*
- * Return the data segment length.
+ * Return: UPIU type.
*/
-static inline unsigned int
-ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
+static inline enum upiu_response_transaction
+ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
{
- return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
- MASK_RSP_UPIU_DATA_SEG_LEN;
+ return ucd_rsp_ptr->header.transaction_code;
}
/**
@@ -917,12 +893,11 @@ ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
* The function checks if the device raised an exception event indicated in
* the Device Information field of response UPIU.
*
- * Returns true if exception is raised, false otherwise.
+ * Return: true if exception is raised, false otherwise.
*/
static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
{
- return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
- MASK_RSP_EXCEPTION_EVENT;
+ return ucd_rsp_ptr->header.device_information & 1;
}
/**
@@ -993,12 +968,13 @@ static inline void ufshcd_hba_start(struct ufs_hba *hba)
* ufshcd_is_hba_active - Get controller state
* @hba: per adapter instance
*
- * Returns true if and only if the controller is active.
+ * Return: true if and only if the controller is active.
*/
-static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
+bool ufshcd_is_hba_active(struct ufs_hba *hba)
{
return ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE;
}
+EXPORT_SYMBOL_GPL(ufshcd_is_hba_active);
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
{
@@ -1029,8 +1005,7 @@ static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
* @hba: per adapter instance
* @scale_up: If True, set max possible frequency othewise set low frequency
*
- * Returns 0 if successful
- * Returns < 0 for any other errors
+ * Return: 0 if successful; < 0 upon failure.
*/
static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
{
@@ -1092,8 +1067,7 @@ out:
* @hba: per adapter instance
* @scale_up: True if scaling up and false if scaling down
*
- * Returns 0 if successful
- * Returns < 0 for any other errors
+ * Return: 0 if successful; < 0 upon failure.
*/
static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
{
@@ -1124,7 +1098,7 @@ out:
* @hba: per adapter instance
* @scale_up: True if scaling up and false if scaling down
*
- * Returns true if scaling is required, false otherwise.
+ * Return: true if scaling is required, false otherwise.
*/
static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
bool scale_up)
@@ -1241,9 +1215,8 @@ out:
* @hba: per adapter instance
* @scale_up: True for scaling up gear and false for scaling down
*
- * Returns 0 for success,
- * Returns -EBUSY if scaling can't happen at this time
- * Returns non-zero for any other errors
+ * Return: 0 for success; -EBUSY if scaling can't happen at this time;
+ * non-zero for any other errors.
*/
static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
{
@@ -1333,9 +1306,8 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc
* @hba: per adapter instance
* @scale_up: True for scaling up and false for scalin down
*
- * Returns 0 for success,
- * Returns -EBUSY if scaling can't happen at this time
- * Returns non-zero for any other errors
+ * Return: 0 for success; -EBUSY if scaling can't happen at this time; non-zero
+ * for any other errors.
*/
static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
{
@@ -2225,10 +2197,11 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
{
u8 *const sense_buffer = lrbp->cmd->sense_buffer;
+ u16 resp_len;
int len;
- if (sense_buffer &&
- ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
+ resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header.data_segment_length);
+ if (sense_buffer && resp_len) {
int len_to_copy;
len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
@@ -2244,6 +2217,8 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
* descriptor
* @hba: per adapter instance
* @lrbp: pointer to local reference block
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static
int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
@@ -2261,8 +2236,8 @@ int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
u16 buf_len;
/* data segment length */
- resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
- MASK_QUERY_DATA_SEG_LEN;
+ resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header
+ .data_segment_length);
buf_len = be16_to_cpu(
hba->dev_cmd.query.request.upiu_req.length);
if (likely(buf_len >= resp_len)) {
@@ -2320,11 +2295,16 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
* ufshcd_ready_for_uic_cmd - Check if controller is ready
* to accept UIC commands
* @hba: per adapter instance
- * Return true on success, else false
+ *
+ * Return: true on success, else false.
*/
static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
{
- return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY;
+ u32 val;
+ int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY,
+ 500, UIC_CMD_TIMEOUT * 1000, false, hba,
+ REG_CONTROLLER_STATUS);
+ return ret == 0 ? true : false;
}
/**
@@ -2332,7 +2312,8 @@ static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
* @hba: Pointer to adapter instance
*
* This function gets the UPMCRS field of HCS register
- * Returns value of UPMCRS field
+ *
+ * Return: value of UPMCRS field.
*/
static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
{
@@ -2370,7 +2351,7 @@ ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
* @hba: per adapter instance
* @uic_cmd: UIC command
*
- * Returns 0 only if success.
+ * Return: 0 only if success.
*/
static int
ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
@@ -2409,14 +2390,13 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
* @uic_cmd: UIC command
* @completion: initialize the completion only if this is set to true
*
- * Returns 0 only if success.
+ * Return: 0 only if success.
*/
static int
__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
bool completion)
{
lockdep_assert_held(&hba->uic_cmd_mutex);
- lockdep_assert_held(hba->host->host_lock);
if (!ufshcd_ready_for_uic_cmd(hba)) {
dev_err(hba->dev,
@@ -2438,12 +2418,11 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
* @hba: per adapter instance
* @uic_cmd: UIC command
*
- * Returns 0 only if success.
+ * Return: 0 only if success.
*/
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
int ret;
- unsigned long flags;
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
return 0;
@@ -2452,9 +2431,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
- spin_lock_irqsave(hba->host->host_lock, flags);
ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
if (!ret)
ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
@@ -2515,7 +2492,7 @@ static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int
* @hba: per adapter instance
* @lrbp: pointer to local reference block
*
- * Returns 0 in case of success, non-zero value in case of failure
+ * Return: 0 in case of success, non-zero value in case of failure.
*/
static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
@@ -2584,10 +2561,10 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags,
enum dma_data_direction cmd_dir, int ehs_length)
{
struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
- u32 data_direction;
- u32 dword_0;
- u32 dword_1 = 0;
- u32 dword_3 = 0;
+ struct request_desc_header *h = &req_desc->header;
+ enum utp_data_direction data_direction;
+
+ *h = (typeof(*h)){ };
if (cmd_dir == DMA_FROM_DEVICE) {
data_direction = UTP_DEVICE_TO_HOST;
@@ -2600,25 +2577,22 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags,
*upiu_flags = UPIU_CMD_FLAGS_NONE;
}
- dword_0 = data_direction | (lrbp->command_type << UPIU_COMMAND_TYPE_OFFSET) |
- ehs_length << 8;
+ h->command_type = lrbp->command_type;
+ h->data_direction = data_direction;
+ h->ehs_length = ehs_length;
+
if (lrbp->intr_cmd)
- dword_0 |= UTP_REQ_DESC_INT_CMD;
+ h->interrupt = 1;
/* Prepare crypto related dwords */
- ufshcd_prepare_req_desc_hdr_crypto(lrbp, &dword_0, &dword_1, &dword_3);
+ ufshcd_prepare_req_desc_hdr_crypto(lrbp, h);
- /* Transfer request descriptor header fields */
- req_desc->header.dword_0 = cpu_to_le32(dword_0);
- req_desc->header.dword_1 = cpu_to_le32(dword_1);
/*
* assigning invalid value for command status. Controller
* updates OCS on command completion, with the command
* status
*/
- req_desc->header.dword_2 =
- cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
- req_desc->header.dword_3 = cpu_to_le32(dword_3);
+ h->ocs = OCS_INVALID_COMMAND_STATUS;
req_desc->prd_table_length = 0;
}
@@ -2636,15 +2610,13 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
unsigned short cdb_len;
- /* command descriptor fields */
- ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
- UPIU_TRANSACTION_COMMAND, upiu_flags,
- lrbp->lun, lrbp->task_tag);
- ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
- UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
-
- /* Total EHS length and Data segment length will be zero */
- ucd_req_ptr->header.dword_2 = 0;
+ ucd_req_ptr->header = (struct utp_upiu_header){
+ .transaction_code = UPIU_TRANSACTION_COMMAND,
+ .flags = upiu_flags,
+ .lun = lrbp->lun,
+ .task_tag = lrbp->task_tag,
+ .command_set_type = UPIU_COMMAND_SET_TYPE_SCSI,
+ };
ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
@@ -2669,18 +2641,19 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
u16 len = be16_to_cpu(query->request.upiu_req.length);
/* Query request header */
- ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
- UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
- lrbp->lun, lrbp->task_tag);
- ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
- 0, query->request.query_func, 0, 0);
-
- /* Data segment length only need for WRITE_DESC */
- if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
- ucd_req_ptr->header.dword_2 =
- UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
- else
- ucd_req_ptr->header.dword_2 = 0;
+ ucd_req_ptr->header = (struct utp_upiu_header){
+ .transaction_code = UPIU_TRANSACTION_QUERY_REQ,
+ .flags = upiu_flags,
+ .lun = lrbp->lun,
+ .task_tag = lrbp->task_tag,
+ .query_function = query->request.query_func,
+ /* Data segment length only need for WRITE_DESC */
+ .data_segment_length =
+ query->request.upiu_req.opcode ==
+ UPIU_QUERY_OPCODE_WRITE_DESC ?
+ cpu_to_be16(len) :
+ 0,
+ };
/* Copy the Query Request buffer as is */
memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
@@ -2699,13 +2672,10 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
- /* command descriptor fields */
- ucd_req_ptr->header.dword_0 =
- UPIU_HEADER_DWORD(
- UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
- /* clear rest of the fields of basic header */
- ucd_req_ptr->header.dword_1 = 0;
- ucd_req_ptr->header.dword_2 = 0;
+ ucd_req_ptr->header = (struct utp_upiu_header){
+ .transaction_code = UPIU_TRANSACTION_NOP_OUT,
+ .task_tag = lrbp->task_tag,
+ };
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
@@ -2715,6 +2685,8 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
* for Device Management Purposes
* @hba: per adapter instance
* @lrbp: pointer to local reference block
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp)
@@ -2743,6 +2715,8 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
* for SCSI Purposes
* @hba: per adapter instance
* @lrbp: pointer to local reference block
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
@@ -2768,7 +2742,7 @@ static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
* ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
* @upiu_wlun_id: UPIU W-LUN id
*
- * Returns SCSI W-LUN id
+ * Return: SCSI W-LUN id.
*/
static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
{
@@ -2839,7 +2813,7 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
* @host: SCSI host pointer
* @cmd: command from SCSI Midlayer
*
- * Returns 0 for success, non-zero in case of failure
+ * Return: 0 for success, non-zero in case of failure.
*/
static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
@@ -2908,8 +2882,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
lrbp->req_abort_skip = false;
- ufshpb_prep(hba, lrbp);
-
ufshcd_comp_scsi_upiu(hba, lrbp);
err = ufshcd_map_sg(hba, lrbp);
@@ -2952,7 +2924,7 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
* Check with the block layer if the command is inflight
* @cmd: command to check.
*
- * Returns true if command is inflight; false if not.
+ * Return: true if command is inflight; false if not.
*/
bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
{
@@ -3007,26 +2979,17 @@ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
mask, ~mask, 1000, 1000);
}
-static int
-ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
-
- /* Get the UPIU response */
- query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
- UPIU_RSP_CODE_OFFSET;
- return query_res->response;
-}
-
/**
* ufshcd_dev_cmd_completion() - handles device management command responses
* @hba: per adapter instance
* @lrbp: pointer to local reference block
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int
ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
- int resp;
+ enum upiu_response_transaction resp;
int err = 0;
hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
@@ -3040,11 +3003,13 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
__func__, resp);
}
break;
- case UPIU_TRANSACTION_QUERY_RSP:
- err = ufshcd_check_query_response(hba, lrbp);
- if (!err)
+ case UPIU_TRANSACTION_QUERY_RSP: {
+ u8 response = lrbp->ucd_rsp_ptr->header.response;
+
+ if (response == 0)
err = ufshcd_copy_query_response(hba, lrbp);
break;
+ }
case UPIU_TRANSACTION_REJECT_UPIU:
/* TODO: handle Reject UPIU Response */
err = -EPERM;
@@ -3159,6 +3124,8 @@ retry:
* @cmd_type: specifies the type (NOP, Query...)
* @timeout: timeout in milliseconds
*
+ * Return: 0 upon success; < 0 upon failure.
+ *
* NOTE: Since there is only one available tag for device management commands,
* it is expected you hold the hba->dev_cmd.lock mutex.
*/
@@ -3250,7 +3217,7 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba,
* @index: flag index to access
* @flag_res: the flag value after the query request completes
*
- * Returns 0 for success, non-zero in case of failure
+ * Return: 0 for success, non-zero in case of failure.
*/
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, u8 index, bool *flag_res)
@@ -3319,7 +3286,7 @@ out_unlock:
* @selector: selector field
* @attr_val: the attribute value after the query request completes
*
- * Returns 0 for success, non-zero in case of failure
+ * Return: 0 for success, non-zero in case of failure.
*/
int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
@@ -3384,7 +3351,7 @@ out_unlock:
* @attr_val: the attribute value after the query request
* completes
*
- * Returns 0 for success, non-zero in case of failure
+ * Return: 0 for success, non-zero in case of failure.
*/
int ufshcd_query_attr_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
@@ -3482,9 +3449,10 @@ out_unlock:
* @desc_buf: the buffer that contains the descriptor
* @buf_len: length parameter passed to the device
*
- * Returns 0 for success, non-zero in case of failure.
* The buf_len parameter will contain, on return, the length parameter
* received on the response.
+ *
+ * Return: 0 for success, non-zero in case of failure.
*/
int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
enum query_opcode opcode,
@@ -3514,7 +3482,7 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
* @param_read_buf: pointer to buffer where parameter would be read
* @param_size: sizeof(param_read_buf)
*
- * Return 0 in case of success, non-zero otherwise
+ * Return: 0 in case of success, non-zero otherwise.
*/
int ufshcd_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
@@ -3694,7 +3662,7 @@ out:
* @param_read_buf: pointer to buffer where parameter would be read
* @param_size: sizeof(param_read_buf)
*
- * Return 0 in case of success, non-zero otherwise
+ * Return: 0 in case of success, non-zero otherwise.
*/
static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
int lun,
@@ -3749,7 +3717,7 @@ static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
* (UTMRDL)
* 4. Allocate memory for local reference block(lrb).
*
- * Returns 0 for success, non-zero in case of failure
+ * Return: 0 for success, non-zero in case of failure.
*/
static int ufshcd_memory_alloc(struct ufs_hba *hba)
{
@@ -3896,7 +3864,7 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
* Once the Unipro links are up, the device connected to the controller
* is detected.
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
static int ufshcd_dme_link_startup(struct ufs_hba *hba)
{
@@ -3918,7 +3886,7 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
* DME_RESET command is issued in order to reset UniPro stack.
* This function now deals with cold reset.
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
static int ufshcd_dme_reset(struct ufs_hba *hba)
{
@@ -3957,7 +3925,7 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
*
* DME_ENABLE command is issued in order to enable UniPro stack.
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
static int ufshcd_dme_enable(struct ufs_hba *hba)
{
@@ -4013,7 +3981,7 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
* @mib_val: setting value as uic command argument3
* @peer: indicate whether peer or local
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
u8 attr_set, u32 mib_val, u8 peer)
@@ -4057,7 +4025,7 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
* @mib_val: the value of the attribute as returned by the UIC command
* @peer: indicate whether peer or local
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
u32 *mib_val, u8 peer)
@@ -4138,7 +4106,7 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
* addition to normal UIC command completion Status (UCCS). This function only
* returns after the relevant status bits indicate the completion.
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
{
@@ -4166,8 +4134,8 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
wmb();
reenable_intr = true;
}
- ret = __ufshcd_send_uic_cmd(hba, cmd, false);
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ret = __ufshcd_send_uic_cmd(hba, cmd, false);
if (ret) {
dev_err(hba->dev,
"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
@@ -4228,7 +4196,7 @@ out_unlock:
* @hba: per adapter instance
* @mode: powr mode value
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
{
@@ -4380,8 +4348,8 @@ static void ufshcd_init_pwr_info(struct ufs_hba *hba)
{
hba->pwr_info.gear_rx = UFS_PWM_G1;
hba->pwr_info.gear_tx = UFS_PWM_G1;
- hba->pwr_info.lane_rx = 1;
- hba->pwr_info.lane_tx = 1;
+ hba->pwr_info.lane_rx = UFS_LANE_1;
+ hba->pwr_info.lane_tx = UFS_LANE_1;
hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
hba->pwr_info.hs_rate = 0;
@@ -4390,6 +4358,8 @@ static void ufshcd_init_pwr_info(struct ufs_hba *hba)
/**
* ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
* @hba: per-adapter instance
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
{
@@ -4547,6 +4517,8 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
* ufshcd_config_pwr_mode - configure a new power mode
* @hba: per-adapter instance
* @desired_pwr_mode: desired power configuration
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
int ufshcd_config_pwr_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *desired_pwr_mode)
@@ -4571,6 +4543,8 @@ EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
* @hba: per-adapter instance
*
* Set fDeviceInit flag and poll until device toggles it.
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_complete_dev_init(struct ufs_hba *hba)
{
@@ -4621,7 +4595,7 @@ out:
* 3. Program UTRL and UTMRL base address
* 4. Configure run-stop-registers
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
int ufshcd_make_hba_operational(struct ufs_hba *hba)
{
@@ -4702,7 +4676,7 @@ EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
* sequence kicks off. When controller is ready it will set
* the Host Controller Enable bit to 1.
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
{
@@ -4847,7 +4821,7 @@ EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
* ufshcd_link_startup - Initialize unipro link startup
* @hba: per adapter instance
*
- * Returns 0 for success, non-zero in case of failure
+ * Return: 0 for success, non-zero in case of failure.
*/
static int ufshcd_link_startup(struct ufs_hba *hba)
{
@@ -4942,6 +4916,8 @@ out:
* If the UTP layer at the device side is not initialized, it may
* not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
* and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_verify_dev_init(struct ufs_hba *hba)
{
@@ -5066,7 +5042,7 @@ set_qdepth:
* ufshcd_slave_alloc - handle initial SCSI device configurations
* @sdev: pointer to SCSI device
*
- * Returns success
+ * Return: success.
*/
static int ufshcd_slave_alloc(struct scsi_device *sdev)
{
@@ -5102,43 +5078,25 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
* @depth: required depth to set
*
* Change queue depth and make sure the max. limits are not crossed.
+ *
+ * Return: new queue depth.
*/
static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
{
return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue));
}
-static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev)
-{
- /* skip well-known LU */
- if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
- !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
- return;
-
- ufshpb_destroy_lu(hba, sdev);
-}
-
-static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev)
-{
- /* skip well-known LU */
- if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
- !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
- return;
-
- ufshpb_init_hpb_lu(hba, sdev);
-}
-
/**
* ufshcd_slave_configure - adjust SCSI device configurations
* @sdev: pointer to SCSI device
+ *
+ * Return: 0 (success).
*/
static int ufshcd_slave_configure(struct scsi_device *sdev)
{
struct ufs_hba *hba = shost_priv(sdev->host);
struct request_queue *q = sdev->request_queue;
- ufshcd_hpb_configure(hba, sdev);
-
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT)
blk_queue_update_dma_alignment(q, SZ_4K - 1);
@@ -5173,8 +5131,6 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
hba = shost_priv(sdev->host);
- ufshcd_hpb_destroy(hba, sdev);
-
/* Drop the reference as it won't be needed anymore */
if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -5208,7 +5164,7 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
* @lrbp: pointer to local reference block of completed command
* @scsi_status: SCSI command status
*
- * Returns value base on SCSI command status
+ * Return: value base on SCSI command status.
*/
static inline int
ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
@@ -5242,7 +5198,7 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
* @lrbp: pointer to local reference block of completed command
* @cqe: pointer to the completion queue entry
*
- * Returns result of the command to notify SCSI midlayer
+ * Return: result of the command to notify SCSI midlayer.
*/
static inline int
ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
@@ -5251,36 +5207,37 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
int result = 0;
int scsi_status;
enum utp_ocs ocs;
+ u8 upiu_flags;
+ u32 resid;
- scsi_set_resid(lrbp->cmd,
- be32_to_cpu(lrbp->ucd_rsp_ptr->sr.residual_transfer_count));
+ upiu_flags = lrbp->ucd_rsp_ptr->header.flags;
+ resid = be32_to_cpu(lrbp->ucd_rsp_ptr->sr.residual_transfer_count);
+ /*
+ * Test !overflow instead of underflow to support UFS devices that do
+ * not set either flag.
+ */
+ if (resid && !(upiu_flags & UPIU_RSP_FLAG_OVERFLOW))
+ scsi_set_resid(lrbp->cmd, resid);
/* overall command status of utrd */
ocs = ufshcd_get_tr_ocs(lrbp, cqe);
if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
- if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
- MASK_RSP_UPIU_RESULT)
+ if (lrbp->ucd_rsp_ptr->header.response ||
+ lrbp->ucd_rsp_ptr->header.status)
ocs = OCS_SUCCESS;
}
switch (ocs) {
case OCS_SUCCESS:
- result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
- switch (result) {
+ switch (ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr)) {
case UPIU_TRANSACTION_RESPONSE:
/*
- * get the response UPIU result to extract
- * the SCSI command status
- */
- result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
-
- /*
* get the result based on SCSI status response
* to notify the SCSI midlayer of the command status
*/
- scsi_status = result & MASK_SCSI_STATUS;
+ scsi_status = lrbp->ucd_rsp_ptr->header.status;
result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
/*
@@ -5300,9 +5257,6 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
/* Flushed in suspend */
schedule_work(&hba->eeh_work);
-
- if (scsi_status == SAM_STAT_GOOD)
- ufshpb_rsp_upiu(hba, lrbp);
break;
case UPIU_TRANSACTION_REJECT_UPIU:
/* TODO: handle Reject UPIU Response */
@@ -5372,7 +5326,7 @@ static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
* @hba: per adapter instance
* @intr_status: interrupt status generated by the controller
*
- * Returns
+ * Return:
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
@@ -5448,8 +5402,7 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
if (hba->dev_cmd.complete) {
if (cqe) {
ocs = le32_to_cpu(cqe->status) & MASK_OCS;
- lrbp->utr_descriptor_ptr->header.dword_2 =
- cpu_to_le32(ocs);
+ lrbp->utr_descriptor_ptr->header.ocs = ocs;
}
complete(hba->dev_cmd.complete);
ufshcd_clk_scaling_update_busy(hba);
@@ -5492,7 +5445,7 @@ static void ufshcd_clear_polled(struct ufs_hba *hba,
}
/*
- * Returns > 0 if one or more commands have been completed or 0 if no
+ * Return: > 0 if one or more commands have been completed or 0 if no
* requests have been completed.
*/
static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
@@ -5582,7 +5535,7 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
* ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
*
- * Returns
+ * Return:
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
@@ -5659,7 +5612,7 @@ int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
* Disables exception event in the device so that the EVENT_ALERT
* bit is not set.
*
- * Returns zero on success, non-zero error value on failure.
+ * Return: zero on success, non-zero error value on failure.
*/
static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
{
@@ -5674,7 +5627,7 @@ static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
* Enable corresponding exception event in the device to allow
* device to alert host in critical scenarios.
*
- * Returns zero on success, non-zero error value on failure.
+ * Return: zero on success, non-zero error value on failure.
*/
static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
{
@@ -5690,7 +5643,7 @@ static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
* as the device is allowed to manage its own way of handling background
* operations.
*
- * Returns zero on success, non-zero on failure.
+ * Return: zero on success, non-zero on failure.
*/
static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
{
@@ -5729,7 +5682,7 @@ out:
* host is idle so that BKOPS are managed effectively without any negative
* impacts.
*
- * Returns zero on success, non-zero on failure.
+ * Return: zero on success, non-zero on failure.
*/
static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
{
@@ -5805,7 +5758,7 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
* bkops_status is greater than or equal to "status" argument passed to
* this function, disable otherwise.
*
- * Returns 0 for success, non-zero in case of failure.
+ * Return: 0 for success, non-zero in case of failure.
*
* NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
* to know whether auto bkops is enabled or disabled after this function
@@ -5846,6 +5799,8 @@ out:
*
* If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
* and negative error value for any other failure.
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_urgent_bkops(struct ufs_hba *hba)
{
@@ -6157,7 +6112,7 @@ static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl)
* to recover from the DL NAC errors or not.
* @hba: per-adapter instance
*
- * Returns true if error handling is required, false otherwise
+ * Return: true if error handling is required, false otherwise.
*/
static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
{
@@ -6384,54 +6339,48 @@ static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
return false;
}
+static bool ufshcd_abort_one(struct request *rq, void *priv)
+{
+ int *ret = priv;
+ u32 tag = rq->tag;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+ struct scsi_device *sdev = cmd->device;
+ struct Scsi_Host *shost = sdev->host;
+ struct ufs_hba *hba = shost_priv(shost);
+
+ *ret = ufshcd_try_to_abort_task(hba, tag);
+ dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
+ hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
+ *ret ? "failed" : "succeeded");
+ return *ret == 0;
+}
+
+/**
+ * ufshcd_abort_all - Abort all pending commands.
+ * @hba: Host bus adapter pointer.
+ *
+ * Return: true if and only if the host controller needs to be reset.
+ */
static bool ufshcd_abort_all(struct ufs_hba *hba)
{
- bool needs_reset = false;
- int tag, ret;
+ int tag, ret = 0;
- if (is_mcq_enabled(hba)) {
- struct ufshcd_lrb *lrbp;
- int tag;
+ blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_abort_one, &ret);
+ if (ret)
+ goto out;
- for (tag = 0; tag < hba->nutrs; tag++) {
- lrbp = &hba->lrb[tag];
- if (!ufshcd_cmd_inflight(lrbp->cmd))
- continue;
- ret = ufshcd_try_to_abort_task(hba, tag);
- dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
- hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
- ret ? "failed" : "succeeded");
- if (ret) {
- needs_reset = true;
- goto out;
- }
- }
- } else {
- /* Clear pending transfer requests */
- for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
- ret = ufshcd_try_to_abort_task(hba, tag);
- dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
- hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
- ret ? "failed" : "succeeded");
- if (ret) {
- needs_reset = true;
- goto out;
- }
- }
- }
/* Clear pending task management requests */
for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
- if (ufshcd_clear_tm_cmd(hba, tag)) {
- needs_reset = true;
+ ret = ufshcd_clear_tm_cmd(hba, tag);
+ if (ret)
goto out;
- }
}
out:
/* Complete the requests that are cleared by s/w */
ufshcd_complete_requests(hba, false);
- return needs_reset;
+ return ret != 0;
}
/**
@@ -6618,7 +6567,7 @@ skip_err_handling:
* ufshcd_update_uic_error - check and set fatal UIC error flags.
* @hba: per-adapter instance
*
- * Returns
+ * Return:
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
@@ -6711,7 +6660,7 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
* @hba: per-adapter instance
* @intr_status: interrupt status generated by the controller
*
- * Returns
+ * Return:
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
@@ -6787,7 +6736,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
* ufshcd_tmc_handler - handle task management function completion
* @hba: per adapter instance
*
- * Returns
+ * Return:
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
@@ -6816,7 +6765,7 @@ static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
* ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
* @hba: per adapter instance
*
- * Returns IRQ_HANDLED if interrupt is handled
+ * Return: IRQ_HANDLED if interrupt is handled.
*/
static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
{
@@ -6851,7 +6800,7 @@ static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
* @hba: per adapter instance
* @intr_status: contains interrupts generated by the controller
*
- * Returns
+ * Return:
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
@@ -6882,7 +6831,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
* @irq: irq number
* @__hba: pointer to adapter instance
*
- * Returns
+ * Return:
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
@@ -6978,7 +6927,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n",
task_tag);
hba->tmf_rqs[req->tag] = req;
- treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
+ treq->upiu_req.req_header.task_tag = task_tag;
memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
@@ -7031,23 +6980,23 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
* @tm_function: task management function opcode
* @tm_response: task management service response return value
*
- * Returns non-zero value on error, zero on success.
+ * Return: non-zero value on error, zero on success.
*/
static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
u8 tm_function, u8 *tm_response)
{
- struct utp_task_req_desc treq = { { 0 }, };
+ struct utp_task_req_desc treq = { };
enum utp_ocs ocs_value;
int err;
/* Configure task request descriptor */
- treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
- treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+ treq.header.interrupt = 1;
+ treq.header.ocs = OCS_INVALID_COMMAND_STATUS;
/* Configure task request UPIU */
- treq.upiu_req.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
- cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
- treq.upiu_req.req_header.dword_1 = cpu_to_be32(tm_function << 16);
+ treq.upiu_req.req_header.transaction_code = UPIU_TRANSACTION_TASK_REQ;
+ treq.upiu_req.req_header.lun = lun_id;
+ treq.upiu_req.req_header.tm_function = tm_function;
/*
* The host shall provide the same value for LUN field in the basic
@@ -7060,7 +7009,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
if (err == -ETIMEDOUT)
return err;
- ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
+ ocs_value = treq.header.ocs & MASK_OCS;
if (ocs_value != OCS_SUCCESS)
dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
__func__, ocs_value);
@@ -7086,6 +7035,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
*
* Since there is only one available tag for device management commands,
* the caller is expected to hold the hba->dev_cmd.lock mutex.
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
struct utp_upiu_req *req_upiu,
@@ -7119,7 +7070,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
/* update the task tag in the request upiu */
- req_upiu->header.dword_0 |= cpu_to_be32(tag);
+ req_upiu->header.task_tag = tag;
ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
@@ -7152,8 +7103,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
- u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
- MASK_QUERY_DATA_SEG_LEN;
+ u16 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header
+ .data_segment_length);
if (*buff_len >= resp_len) {
memcpy(desc_buff, descp, resp_len);
@@ -7187,19 +7138,21 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
* Management requests.
* It is up to the caller to fill the upiu conent properly, as it will
* be copied without any further input validations.
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
struct utp_upiu_req *req_upiu,
struct utp_upiu_req *rsp_upiu,
- int msgcode,
+ enum upiu_request_transaction msgcode,
u8 *desc_buff, int *buff_len,
enum query_opcode desc_op)
{
int err;
enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
- struct utp_task_req_desc treq = { { 0 }, };
+ struct utp_task_req_desc treq = { };
enum utp_ocs ocs_value;
- u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
+ u8 tm_f = req_upiu->header.tm_function;
switch (msgcode) {
case UPIU_TRANSACTION_NOP_OUT:
@@ -7216,8 +7169,8 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
break;
case UPIU_TRANSACTION_TASK_REQ:
- treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
- treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+ treq.header.interrupt = 1;
+ treq.header.ocs = OCS_INVALID_COMMAND_STATUS;
memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu));
@@ -7225,7 +7178,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
if (err == -ETIMEDOUT)
break;
- ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
+ ocs_value = treq.header.ocs & MASK_OCS;
if (ocs_value != OCS_SUCCESS) {
dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
ocs_value);
@@ -7255,7 +7208,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
* @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
* @dir: DMA direction
*
- * Returns zero on success, non-zero on failure
+ * Return: zero on success, non-zero on failure.
*/
int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs,
@@ -7288,10 +7241,18 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
/* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
+ /*
+ * According to UFSHCI 4.0 specification page 24, if EHSLUTRDS is 0, host controller takes
+ * EHS length from CMD UPIU, and SW driver use EHS Length field in CMD UPIU. if it is 1,
+ * HW controller takes EHS length from UTRD.
+ */
+ if (hba->capabilities & MASK_EHSLUTRD_SUPPORTED)
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
+ else
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 0);
- /* update the task tag and LUN in the request upiu */
- req_upiu->header.dword_0 |= cpu_to_be32(upiu_flags << 16 | UFS_UPIU_RPMB_WLUN << 8 | tag);
+ /* update the task tag */
+ req_upiu->header.task_tag = tag;
/* copy the UPIU(contains CDB) request as it is */
memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
@@ -7313,9 +7274,10 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
/* Just copy the upiu response as it is */
memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
/* Get the response UPIU result */
- result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
+ result = (lrbp->ucd_rsp_ptr->header.response << 8) |
+ lrbp->ucd_rsp_ptr->header.status;
- ehs_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) >> 24;
+ ehs_len = lrbp->ucd_rsp_ptr->header.ehs_length;
/*
* Since the bLength in EHS indicates the total size of the EHS Header and EHS Data
* in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB
@@ -7341,7 +7303,7 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
* ufshcd_eh_device_reset_handler() - Reset a single logical unit.
* @cmd: SCSI command pointer
*
- * Returns SUCCESS/FAILED
+ * Return: SUCCESS or FAILED.
*/
static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
@@ -7436,7 +7398,7 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
* issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
* really issued and then try to abort it.
*
- * Returns zero on success, non-zero on failure
+ * Return: zero on success, non-zero on failure.
*/
int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
{
@@ -7524,7 +7486,7 @@ out:
* ufshcd_abort - scsi host template eh_abort_handler callback
* @cmd: SCSI command pointer
*
- * Returns SUCCESS/FAILED
+ * Return: SUCCESS or FAILED.
*/
static int ufshcd_abort(struct scsi_cmnd *cmd)
{
@@ -7649,7 +7611,7 @@ release:
* local and remote (device) Uni-Pro stack and the attributes
* are reset to default state.
*
- * Returns zero on success, non-zero on failure
+ * Return: zero on success, non-zero on failure.
*/
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
{
@@ -7659,7 +7621,6 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
* Stop the host controller and complete the requests
* cleared by h/w
*/
- ufshpb_toggle_state(hba, HPB_PRESENT, HPB_RESET);
ufshcd_hba_stop(hba);
hba->silence_err_logs = true;
ufshcd_complete_requests(hba, true);
@@ -7687,7 +7648,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
* Reset and recover device, host and re-establish link. This
* is helpful to recover the communication in fatal error conditions.
*
- * Returns zero on success, non-zero on failure
+ * Return: zero on success, non-zero on failure.
*/
static int ufshcd_reset_and_restore(struct ufs_hba *hba)
{
@@ -7745,7 +7706,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
* ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
* @cmd: SCSI command pointer
*
- * Returns SUCCESS/FAILED
+ * Return: SUCCESS or FAILED.
*/
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
{
@@ -7777,7 +7738,7 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
* @start_scan: row at the desc table to start scan from
* @buff: power descriptor buffer
*
- * Returns calculated max ICC level for specific regulator
+ * Return: calculated max ICC level for specific regulator.
*/
static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan,
const char *buff)
@@ -7823,7 +7784,7 @@ static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan,
* @hba: per-adapter instance
* @desc_buf: power descriptor buffer to extract ICC levels from.
*
- * Returns calculated ICC level
+ * Return: calculated ICC level.
*/
static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
const u8 *desc_buf)
@@ -7932,7 +7893,7 @@ static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
* This function adds scsi device instances for each of all well known LUs
* (except "REPORT LUNS" LU).
*
- * Returns zero on success (all required W-LUs are added successfully),
+ * Return: zero on success (all required W-LUs are added successfully),
* non-zero error value on failure (if failed to add any of the required W-LU).
*/
static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
@@ -8122,7 +8083,6 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
{
int err;
u8 model_index;
- u8 b_ufs_feature_sup;
u8 *desc_buf;
struct ufs_dev_info *dev_info = &hba->dev_info;
@@ -8151,26 +8111,9 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
- b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
- if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION &&
- (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) {
- bool hpb_en = false;
-
- ufshpb_get_dev_info(hba, desc_buf);
-
- if (!ufshpb_is_legacy(hba))
- err = ufshcd_query_flag_retry(hba,
- UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_HPB_EN, 0,
- &hpb_en);
-
- if (ufshpb_is_legacy(hba) || (!err && hpb_en))
- dev_info->hpb_enabled = true;
- }
-
err = ufshcd_read_string_desc(hba, model_index,
&dev_info->model, SD_ASCII_STD);
if (err < 0) {
@@ -8219,7 +8162,7 @@ static void ufs_put_device_desc(struct ufs_hba *hba)
* RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
* the hibern8 exit latency.
*
- * Returns zero on success, non-zero error value on failure.
+ * Return: zero on success, non-zero error value on failure.
*/
static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
{
@@ -8254,7 +8197,7 @@ out:
* TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
* This optimal value can help reduce the hibern8 exit latency.
*
- * Returns zero on success, non-zero error value on failure.
+ * Return: zero on success, non-zero error value on failure.
*/
static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
{
@@ -8296,7 +8239,7 @@ out:
* PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
* for such devices.
*
- * Returns zero on success, non-zero error value on failure.
+ * Return: zero on success, non-zero error value on failure.
*/
static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
{
@@ -8405,10 +8348,6 @@ static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
hba->dev_info.max_lu_supported = 8;
- if (desc_buf[QUERY_DESC_LENGTH_OFFSET] >=
- GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS)
- ufshpb_get_geo_info(hba, desc_buf);
-
out:
kfree(desc_buf);
return err;
@@ -8558,6 +8497,8 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
/**
* ufshcd_add_lus - probe and add UFS logical units
* @hba: per-adapter instance
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_add_lus(struct ufs_hba *hba)
{
@@ -8584,7 +8525,6 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
}
ufs_bsg_probe(hba);
- ufshpb_init(hba);
scsi_scan_host(hba->host);
pm_runtime_put_sync(hba->dev);
@@ -8770,6 +8710,8 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
* @init_dev_params: whether or not to call ufshcd_device_params_init().
*
* Execute link-startup and verify device initialization
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
{
@@ -8818,7 +8760,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
- ufshpb_toggle_state(hba, HPB_RESET, HPB_PRESENT);
out:
spin_lock_irqsave(hba->host->host_lock, flags);
if (ret)
@@ -8888,10 +8829,6 @@ static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
static const struct attribute_group *ufshcd_driver_groups[] = {
&ufs_sysfs_unit_descriptor_group,
&ufs_sysfs_lun_attributes_group,
-#ifdef CONFIG_SCSI_UFS_HPB
- &ufs_sysfs_hpb_stat_group,
- &ufs_sysfs_hpb_param_group,
-#endif
NULL,
};
@@ -9235,8 +9172,9 @@ static int ufshcd_variant_hba_init(struct ufs_hba *hba)
err = ufshcd_vops_init(hba);
if (err)
- dev_err(hba->dev, "%s: variant %s init failed err %d\n",
- __func__, ufshcd_get_var_name(hba), err);
+ dev_err_probe(hba->dev, err,
+ "%s: variant %s init failed with err %d\n",
+ __func__, ufshcd_get_var_name(hba), err);
out:
return err;
}
@@ -9345,8 +9283,8 @@ static int ufshcd_execute_start_stop(struct scsi_device *sdev,
* @hba: per adapter instance
* @pwr_mode: device power mode to set
*
- * Returns 0 if requested power mode is set successfully
- * Returns < 0 if failed to set the requested power mode
+ * Return: 0 if requested power mode is set successfully;
+ * < 0 if failed to set the requested power mode.
*/
static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
enum ufs_dev_pwr_mode pwr_mode)
@@ -9576,8 +9514,6 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
req_link_state = UIC_LINK_OFF_STATE;
}
- ufshpb_suspend(hba);
-
/*
* If we can't transition into any of the low power modes
* just gate the clocks.
@@ -9731,7 +9667,6 @@ out:
ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
hba->clk_gating.is_suspended = false;
ufshcd_release(hba);
- ufshpb_resume(hba);
}
hba->pm_op_in_progress = false;
return ret;
@@ -9812,7 +9747,6 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
- ufshpb_resume(hba);
goto out;
set_old_link_state:
@@ -9934,6 +9868,8 @@ out:
*
* This function will put disable irqs, turn off clocks
* and set vreg and hba-vreg in lpm mode.
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_suspend(struct ufs_hba *hba)
{
@@ -9971,7 +9907,7 @@ static int ufshcd_suspend(struct ufs_hba *hba)
* This function basically turns on the regulators, clocks and
* irqs of the hba.
*
- * Returns 0 for success and non-zero for failure
+ * Return: 0 for success and non-zero for failure.
*/
static int ufshcd_resume(struct ufs_hba *hba)
{
@@ -10012,7 +9948,7 @@ out:
* Executed before putting the system into a sleep state in which the contents
* of main memory are preserved.
*
- * Returns 0 for success and non-zero for failure
+ * Return: 0 for success and non-zero for failure.
*/
int ufshcd_system_suspend(struct device *dev)
{
@@ -10039,7 +9975,7 @@ EXPORT_SYMBOL(ufshcd_system_suspend);
* Executed after waking the system up from a sleep state in which the contents
* of main memory were preserved.
*
- * Returns 0 for success and non-zero for failure
+ * Return: 0 for success and non-zero for failure.
*/
int ufshcd_system_resume(struct device *dev)
{
@@ -10069,7 +10005,7 @@ EXPORT_SYMBOL(ufshcd_system_resume);
*
* Check the description of ufshcd_suspend() function for more details.
*
- * Returns 0 for success and non-zero for failure
+ * Return: 0 for success and non-zero for failure.
*/
int ufshcd_runtime_suspend(struct device *dev)
{
@@ -10095,6 +10031,8 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
*
* 1. Turn on all the controller related clocks
* 2. Turn ON VCC rail
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
int ufshcd_runtime_resume(struct device *dev)
{
@@ -10152,7 +10090,6 @@ void ufshcd_remove(struct ufs_hba *hba)
ufshcd_rpm_get_sync(hba);
ufs_hwmon_remove(hba);
ufs_bsg_remove(hba);
- ufshpb_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
blk_mq_destroy_queue(hba->tmf_queue);
blk_put_queue(hba->tmf_queue);
@@ -10230,7 +10167,7 @@ EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
* addressing capability
* @hba: per adapter instance
*
- * Returns 0 for success, non-zero for failure
+ * Return: 0 for success, non-zero for failure.
*/
static int ufshcd_set_dma_mask(struct ufs_hba *hba)
{
@@ -10245,7 +10182,8 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
* ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
* @dev: pointer to device handle
* @hba_handle: driver private handle
- * Returns 0 on success, non-zero value on failure
+ *
+ * Return: 0 on success, non-zero value on failure.
*/
int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
{
@@ -10301,7 +10239,8 @@ static const struct blk_mq_ops ufshcd_tmf_ops = {
* @hba: per-adapter instance
* @mmio_base: base register address
* @irq: Interrupt line of device
- * Returns 0 on success, non-zero value on failure
+ *
+ * Return: 0 on success, non-zero value on failure.
*/
int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
{
@@ -10632,6 +10571,53 @@ static const struct dev_pm_ops ufshcd_wl_pm_ops = {
SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL)
};
+static void ufshcd_check_header_layout(void)
+{
+ /*
+ * gcc compilers before version 10 cannot do constant-folding for
+ * sub-byte bitfields. Hence skip the layout checks for gcc 9 and
+ * before.
+ */
+ if (IS_ENABLED(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 100000)
+ return;
+
+ BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
+ .cci = 3})[0] != 3);
+
+ BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
+ .ehs_length = 2})[1] != 2);
+
+ BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
+ .enable_crypto = 1})[2]
+ != 0x80);
+
+ BUILD_BUG_ON((((u8 *)&(struct request_desc_header){
+ .command_type = 5,
+ .data_direction = 3,
+ .interrupt = 1,
+ })[3]) != ((5 << 4) | (3 << 1) | 1));
+
+ BUILD_BUG_ON(((__le32 *)&(struct request_desc_header){
+ .dunl = cpu_to_le32(0xdeadbeef)})[1] !=
+ cpu_to_le32(0xdeadbeef));
+
+ BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
+ .ocs = 4})[8] != 4);
+
+ BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
+ .cds = 5})[9] != 5);
+
+ BUILD_BUG_ON(((__le32 *)&(struct request_desc_header){
+ .dunu = cpu_to_le32(0xbadcafe)})[3] !=
+ cpu_to_le32(0xbadcafe));
+
+ BUILD_BUG_ON(((u8 *)&(struct utp_upiu_header){
+ .iid = 0xf })[4] != 0xf0);
+
+ BUILD_BUG_ON(((u8 *)&(struct utp_upiu_header){
+ .command_set_type = 0xf })[4] != 0xf);
+}
+
/*
* ufs_dev_wlun_template - describes ufs device wlun
* ufs-device wlun - used to send pm commands
@@ -10657,6 +10643,8 @@ static int __init ufshcd_core_init(void)
{
int ret;
+ ufshcd_check_header_layout();
+
ufs_debugfs_init();
ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
diff --git a/drivers/ufs/core/ufshpb.c b/drivers/ufs/core/ufshpb.c
deleted file mode 100644
index 255f8b38d0c2..000000000000
--- a/drivers/ufs/core/ufshpb.c
+++ /dev/null
@@ -1,2668 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Universal Flash Storage Host Performance Booster
- *
- * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
- *
- * Authors:
- * Yongmyung Lee <ymhungry.lee@samsung.com>
- * Jinyoung Choi <j-young.choi@samsung.com>
- */
-
-#include <asm/unaligned.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include <scsi/scsi_cmnd.h>
-
-#include "ufshcd-priv.h"
-#include "ufshpb.h"
-#include "../../scsi/sd.h"
-
-#define ACTIVATION_THRESHOLD 8 /* 8 IOs */
-#define READ_TO_MS 1000
-#define READ_TO_EXPIRIES 100
-#define POLLING_INTERVAL_MS 200
-#define THROTTLE_MAP_REQ_DEFAULT 1
-
-/* memory management */
-static struct kmem_cache *ufshpb_mctx_cache;
-static mempool_t *ufshpb_mctx_pool;
-static mempool_t *ufshpb_page_pool;
-/* A cache size of 2MB can cache ppn in the 1GB range. */
-static unsigned int ufshpb_host_map_kbytes = SZ_2K;
-static int tot_active_srgn_pages;
-
-static struct workqueue_struct *ufshpb_wq;
-
-static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
- int srgn_idx);
-
-bool ufshpb_is_allowed(struct ufs_hba *hba)
-{
- return !(hba->ufshpb_dev.hpb_disabled);
-}
-
-/* HPB version 1.0 is called as legacy version. */
-bool ufshpb_is_legacy(struct ufs_hba *hba)
-{
- return hba->ufshpb_dev.is_legacy;
-}
-
-static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
-{
- return sdev->hostdata;
-}
-
-static int ufshpb_get_state(struct ufshpb_lu *hpb)
-{
- return atomic_read(&hpb->hpb_state);
-}
-
-static void ufshpb_set_state(struct ufshpb_lu *hpb, int state)
-{
- atomic_set(&hpb->hpb_state, state);
-}
-
-static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
- struct ufshpb_subregion *srgn)
-{
- return rgn->rgn_state != HPB_RGN_INACTIVE &&
- srgn->srgn_state == HPB_SRGN_VALID;
-}
-
-static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
-{
- return req_op(scsi_cmd_to_rq(cmd)) == REQ_OP_READ;
-}
-
-static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd)
-{
- return op_is_write(req_op(scsi_cmd_to_rq(cmd))) ||
- op_is_discard(req_op(scsi_cmd_to_rq(cmd)));
-}
-
-static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
-{
- return transfer_len <= hpb->pre_req_max_tr_len;
-}
-
-static bool ufshpb_is_general_lun(int lun)
-{
- return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
-}
-
-static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
-{
- return hpb->lu_pinned_end != PINNED_NOT_SET &&
- rgn_idx >= hpb->lu_pinned_start && rgn_idx <= hpb->lu_pinned_end;
-}
-
-static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
-{
- bool ret = false;
- unsigned long flags;
-
- if (ufshpb_get_state(hpb) != HPB_PRESENT)
- return;
-
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn))
- ret = true;
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
-
- if (ret)
- queue_work(ufshpb_wq, &hpb->map_work);
-}
-
-static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp,
- struct utp_hpb_rsp *rsp_field)
-{
- /* Check HPB_UPDATE_ALERT */
- if (!(lrbp->ucd_rsp_ptr->header.dword_2 &
- UPIU_HEADER_DWORD(0, 2, 0, 0)))
- return false;
-
- if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
- rsp_field->desc_type != DEV_DES_TYPE ||
- rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
- rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
- rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
- rsp_field->hpb_op == HPB_RSP_NONE ||
- (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE &&
- !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
- return false;
-
- if (!ufshpb_is_general_lun(rsp_field->lun)) {
- dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
- lrbp->lun);
- return false;
- }
-
- return true;
-}
-
-static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
- int srgn_offset, int cnt, bool set_dirty)
-{
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn, *prev_srgn = NULL;
- int set_bit_len;
- int bitmap_len;
- unsigned long flags;
-
-next_srgn:
- rgn = hpb->rgn_tbl + rgn_idx;
- srgn = rgn->srgn_tbl + srgn_idx;
-
- if (likely(!srgn->is_last))
- bitmap_len = hpb->entries_per_srgn;
- else
- bitmap_len = hpb->last_srgn_entries;
-
- if ((srgn_offset + cnt) > bitmap_len)
- set_bit_len = bitmap_len - srgn_offset;
- else
- set_bit_len = cnt;
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- if (rgn->rgn_state != HPB_RGN_INACTIVE) {
- if (set_dirty) {
- if (srgn->srgn_state == HPB_SRGN_VALID)
- bitmap_set(srgn->mctx->ppn_dirty, srgn_offset,
- set_bit_len);
- } else if (hpb->is_hcm) {
- /* rewind the read timer for lru regions */
- rgn->read_timeout = ktime_add_ms(ktime_get(),
- rgn->hpb->params.read_timeout_ms);
- rgn->read_timeout_expiries =
- rgn->hpb->params.read_timeout_expiries;
- }
- }
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
-
- if (hpb->is_hcm && prev_srgn != srgn) {
- bool activate = false;
-
- spin_lock(&rgn->rgn_lock);
- if (set_dirty) {
- rgn->reads -= srgn->reads;
- srgn->reads = 0;
- set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
- } else {
- srgn->reads++;
- rgn->reads++;
- if (srgn->reads == hpb->params.activation_thld)
- activate = true;
- }
- spin_unlock(&rgn->rgn_lock);
-
- if (activate ||
- test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) {
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
- dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
- "activate region %d-%d\n", rgn_idx, srgn_idx);
- }
-
- prev_srgn = srgn;
- }
-
- srgn_offset = 0;
- if (++srgn_idx == hpb->srgns_per_rgn) {
- srgn_idx = 0;
- rgn_idx++;
- }
-
- cnt -= set_bit_len;
- if (cnt > 0)
- goto next_srgn;
-}
-
-static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
- int srgn_idx, int srgn_offset, int cnt)
-{
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
- int bitmap_len;
- int bit_len;
-
-next_srgn:
- rgn = hpb->rgn_tbl + rgn_idx;
- srgn = rgn->srgn_tbl + srgn_idx;
-
- if (!ufshpb_is_valid_srgn(rgn, srgn))
- return true;
-
- /*
- * If the region state is active, mctx must be allocated.
- * In this case, check whether the region is evicted or
- * mctx allocation fail.
- */
- if (unlikely(!srgn->mctx)) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "no mctx in region %d subregion %d.\n",
- srgn->rgn_idx, srgn->srgn_idx);
- return true;
- }
-
- if (likely(!srgn->is_last))
- bitmap_len = hpb->entries_per_srgn;
- else
- bitmap_len = hpb->last_srgn_entries;
-
- if ((srgn_offset + cnt) > bitmap_len)
- bit_len = bitmap_len - srgn_offset;
- else
- bit_len = cnt;
-
- if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset,
- srgn_offset) < bit_len + srgn_offset)
- return true;
-
- srgn_offset = 0;
- if (++srgn_idx == hpb->srgns_per_rgn) {
- srgn_idx = 0;
- rgn_idx++;
- }
-
- cnt -= bit_len;
- if (cnt > 0)
- goto next_srgn;
-
- return false;
-}
-
-static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
-{
- return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
-}
-
-static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
- struct ufshpb_map_ctx *mctx, int pos,
- int len, __be64 *ppn_buf)
-{
- struct page *page;
- int index, offset;
- int copied;
-
- index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
- offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
-
- if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE))
- copied = len;
- else
- copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset;
-
- page = mctx->m_page[index];
- if (unlikely(!page)) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "error. cannot find page in mctx\n");
- return -ENOMEM;
- }
-
- memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE),
- copied * HPB_ENTRY_SIZE);
-
- return copied;
-}
-
-static void
-ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
- int *srgn_idx, int *offset)
-{
- int rgn_offset;
-
- *rgn_idx = lpn >> hpb->entries_per_rgn_shift;
- rgn_offset = lpn & hpb->entries_per_rgn_mask;
- *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
- *offset = rgn_offset & hpb->entries_per_srgn_mask;
-}
-
-static void
-ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
- __be64 ppn, u8 transfer_len)
-{
- unsigned char *cdb = lrbp->cmd->cmnd;
- __be64 ppn_tmp = ppn;
- cdb[0] = UFSHPB_READ;
-
- if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
- ppn_tmp = (__force __be64)swab64((__force u64)ppn);
-
- /* ppn value is stored as big-endian in the host memory */
- memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
- cdb[14] = transfer_len;
- cdb[15] = 0;
-
- lrbp->cmd->cmd_len = UFS_CDB_SIZE;
-}
-
-/*
- * This function will set up HPB read command using host-side L2P map data.
- */
-int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- struct ufshpb_lu *hpb;
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
- struct scsi_cmnd *cmd = lrbp->cmd;
- u32 lpn;
- __be64 ppn;
- unsigned long flags;
- int transfer_len, rgn_idx, srgn_idx, srgn_offset;
- int err = 0;
-
- hpb = ufshpb_get_hpb_data(cmd->device);
- if (!hpb)
- return -ENODEV;
-
- if (ufshpb_get_state(hpb) == HPB_INIT)
- return -ENODEV;
-
- if (ufshpb_get_state(hpb) != HPB_PRESENT) {
- dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: ufshpb state is not PRESENT", __func__);
- return -ENODEV;
- }
-
- if (blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) ||
- (!ufshpb_is_write_or_discard(cmd) &&
- !ufshpb_is_read_cmd(cmd)))
- return 0;
-
- transfer_len = sectors_to_logical(cmd->device,
- blk_rq_sectors(scsi_cmd_to_rq(cmd)));
- if (unlikely(!transfer_len))
- return 0;
-
- lpn = sectors_to_logical(cmd->device, blk_rq_pos(scsi_cmd_to_rq(cmd)));
- ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
- rgn = hpb->rgn_tbl + rgn_idx;
- srgn = rgn->srgn_tbl + srgn_idx;
-
- /* If command type is WRITE or DISCARD, set bitmap as dirty */
- if (ufshpb_is_write_or_discard(cmd)) {
- ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
- transfer_len, true);
- return 0;
- }
-
- if (!ufshpb_is_supported_chunk(hpb, transfer_len))
- return 0;
-
- if (hpb->is_hcm) {
- /*
- * in host control mode, reads are the main source for
- * activation trials.
- */
- ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
- transfer_len, false);
-
- /* keep those counters normalized */
- if (rgn->reads > hpb->entries_per_srgn)
- schedule_work(&hpb->ufshpb_normalization_work);
- }
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
- transfer_len)) {
- hpb->stats.miss_cnt++;
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- return 0;
- }
-
- err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn);
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- if (unlikely(err < 0)) {
- /*
- * In this case, the region state is active,
- * but the ppn table is not allocated.
- * Make sure that ppn table must be allocated on
- * active state.
- */
- dev_err(hba->dev, "get ppn failed. err %d\n", err);
- return err;
- }
-
- ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len);
-
- hpb->stats.hit_cnt++;
- return 0;
-}
-
-static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb, int rgn_idx,
- enum req_op op, bool atomic)
-{
- struct ufshpb_req *rq;
- struct request *req;
- int retries = HPB_MAP_REQ_RETRIES;
-
- rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
- if (!rq)
- return NULL;
-
-retry:
- req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, op,
- BLK_MQ_REQ_NOWAIT);
-
- if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
- usleep_range(3000, 3100);
- goto retry;
- }
-
- if (IS_ERR(req))
- goto free_rq;
-
- rq->hpb = hpb;
- rq->req = req;
- rq->rb.rgn_idx = rgn_idx;
-
- return rq;
-
-free_rq:
- kmem_cache_free(hpb->map_req_cache, rq);
- return NULL;
-}
-
-static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
-{
- blk_mq_free_request(rq->req);
- kmem_cache_free(hpb->map_req_cache, rq);
-}
-
-static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
- struct ufshpb_subregion *srgn)
-{
- struct ufshpb_req *map_req;
- struct bio *bio;
- unsigned long flags;
-
- if (hpb->is_hcm &&
- hpb->num_inflight_map_req >= hpb->params.inflight_map_req) {
- dev_info(&hpb->sdev_ufs_lu->sdev_dev,
- "map_req throttle. inflight %d throttle %d",
- hpb->num_inflight_map_req,
- hpb->params.inflight_map_req);
- return NULL;
- }
-
- map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false);
- if (!map_req)
- return NULL;
-
- bio = bio_alloc(NULL, hpb->pages_per_srgn, 0, GFP_KERNEL);
- if (!bio) {
- ufshpb_put_req(hpb, map_req);
- return NULL;
- }
-
- map_req->bio = bio;
-
- map_req->rb.srgn_idx = srgn->srgn_idx;
- map_req->rb.mctx = srgn->mctx;
-
- spin_lock_irqsave(&hpb->param_lock, flags);
- hpb->num_inflight_map_req++;
- spin_unlock_irqrestore(&hpb->param_lock, flags);
-
- return map_req;
-}
-
-static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
- struct ufshpb_req *map_req)
-{
- unsigned long flags;
-
- bio_put(map_req->bio);
- ufshpb_put_req(hpb, map_req);
-
- spin_lock_irqsave(&hpb->param_lock, flags);
- hpb->num_inflight_map_req--;
- spin_unlock_irqrestore(&hpb->param_lock, flags);
-}
-
-static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
- struct ufshpb_subregion *srgn)
-{
- struct ufshpb_region *rgn;
- u32 num_entries = hpb->entries_per_srgn;
-
- if (!srgn->mctx) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "no mctx in region %d subregion %d.\n",
- srgn->rgn_idx, srgn->srgn_idx);
- return -1;
- }
-
- if (unlikely(srgn->is_last))
- num_entries = hpb->last_srgn_entries;
-
- bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
-
- rgn = hpb->rgn_tbl + srgn->rgn_idx;
- clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
-
- return 0;
-}
-
-static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
- int srgn_idx)
-{
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
-
- rgn = hpb->rgn_tbl + rgn_idx;
- srgn = rgn->srgn_tbl + srgn_idx;
-
- list_del_init(&rgn->list_inact_rgn);
-
- if (list_empty(&srgn->list_act_srgn))
- list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
-
- hpb->stats.rcmd_active_cnt++;
-}
-
-static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
-{
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
- int srgn_idx;
-
- rgn = hpb->rgn_tbl + rgn_idx;
-
- for_each_sub_region(rgn, srgn_idx, srgn)
- list_del_init(&srgn->list_act_srgn);
-
- if (list_empty(&rgn->list_inact_rgn))
- list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
-
- hpb->stats.rcmd_inactive_cnt++;
-}
-
-static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
- struct ufshpb_subregion *srgn)
-{
- struct ufshpb_region *rgn;
-
- /*
- * If there is no mctx in subregion
- * after I/O progress for HPB_READ_BUFFER, the region to which the
- * subregion belongs was evicted.
- * Make sure the region must not evict in I/O progress
- */
- if (!srgn->mctx) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "no mctx in region %d subregion %d.\n",
- srgn->rgn_idx, srgn->srgn_idx);
- srgn->srgn_state = HPB_SRGN_INVALID;
- return;
- }
-
- rgn = hpb->rgn_tbl + srgn->rgn_idx;
-
- if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "region %d subregion %d evicted\n",
- srgn->rgn_idx, srgn->srgn_idx);
- srgn->srgn_state = HPB_SRGN_INVALID;
- return;
- }
- srgn->srgn_state = HPB_SRGN_VALID;
-}
-
-static enum rq_end_io_ret ufshpb_umap_req_compl_fn(struct request *req,
- blk_status_t error)
-{
- struct ufshpb_req *umap_req = req->end_io_data;
-
- ufshpb_put_req(umap_req->hpb, umap_req);
- return RQ_END_IO_NONE;
-}
-
-static enum rq_end_io_ret ufshpb_map_req_compl_fn(struct request *req,
- blk_status_t error)
-{
- struct ufshpb_req *map_req = req->end_io_data;
- struct ufshpb_lu *hpb = map_req->hpb;
- struct ufshpb_subregion *srgn;
- unsigned long flags;
-
- srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl +
- map_req->rb.srgn_idx;
-
- ufshpb_clear_dirty_bitmap(hpb, srgn);
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- ufshpb_activate_subregion(hpb, srgn);
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
-
- ufshpb_put_map_req(map_req->hpb, map_req);
- return RQ_END_IO_NONE;
-}
-
-static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
-{
- cdb[0] = UFSHPB_WRITE_BUFFER;
- cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID :
- UFSHPB_WRITE_BUFFER_INACT_ALL_ID;
- if (rgn)
- put_unaligned_be16(rgn->rgn_idx, &cdb[2]);
- cdb[9] = 0x00;
-}
-
-static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
- int srgn_idx, int srgn_mem_size)
-{
- cdb[0] = UFSHPB_READ_BUFFER;
- cdb[1] = UFSHPB_READ_BUFFER_ID;
-
- put_unaligned_be16(rgn_idx, &cdb[2]);
- put_unaligned_be16(srgn_idx, &cdb[4]);
- put_unaligned_be24(srgn_mem_size, &cdb[6]);
-
- cdb[9] = 0x00;
-}
-
-static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
- struct ufshpb_req *umap_req,
- struct ufshpb_region *rgn)
-{
- struct request *req = umap_req->req;
- struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
-
- req->timeout = 0;
- req->end_io_data = umap_req;
- req->end_io = ufshpb_umap_req_compl_fn;
-
- ufshpb_set_unmap_cmd(scmd->cmnd, rgn);
- scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
-
- blk_execute_rq_nowait(req, true);
-
- hpb->stats.umap_req_cnt++;
-}
-
-static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
- struct ufshpb_req *map_req, bool last)
-{
- struct request_queue *q;
- struct request *req;
- struct scsi_cmnd *scmd;
- int mem_size = hpb->srgn_mem_size;
- int ret = 0;
- int i;
-
- q = hpb->sdev_ufs_lu->request_queue;
- for (i = 0; i < hpb->pages_per_srgn; i++) {
- ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
- PAGE_SIZE, 0);
- if (ret != PAGE_SIZE) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "bio_add_pc_page fail %d - %d\n",
- map_req->rb.rgn_idx, map_req->rb.srgn_idx);
- return ret;
- }
- }
-
- req = map_req->req;
-
- blk_rq_append_bio(req, map_req->bio);
-
- req->end_io_data = map_req;
- req->end_io = ufshpb_map_req_compl_fn;
-
- if (unlikely(last))
- mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
-
- scmd = blk_mq_rq_to_pdu(req);
- ufshpb_set_read_buf_cmd(scmd->cmnd, map_req->rb.rgn_idx,
- map_req->rb.srgn_idx, mem_size);
- scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
-
- blk_execute_rq_nowait(req, true);
-
- hpb->stats.map_req_cnt++;
- return 0;
-}
-
-static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb,
- bool last)
-{
- struct ufshpb_map_ctx *mctx;
- u32 num_entries = hpb->entries_per_srgn;
- int i, j;
-
- mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL);
- if (!mctx)
- return NULL;
-
- mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL);
- if (!mctx->m_page)
- goto release_mctx;
-
- if (unlikely(last))
- num_entries = hpb->last_srgn_entries;
-
- mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL);
- if (!mctx->ppn_dirty)
- goto release_m_page;
-
- for (i = 0; i < hpb->pages_per_srgn; i++) {
- mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL);
- if (!mctx->m_page[i]) {
- for (j = 0; j < i; j++)
- mempool_free(mctx->m_page[j], ufshpb_page_pool);
- goto release_ppn_dirty;
- }
- clear_page(page_address(mctx->m_page[i]));
- }
-
- return mctx;
-
-release_ppn_dirty:
- bitmap_free(mctx->ppn_dirty);
-release_m_page:
- kmem_cache_free(hpb->m_page_cache, mctx->m_page);
-release_mctx:
- mempool_free(mctx, ufshpb_mctx_pool);
- return NULL;
-}
-
-static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb,
- struct ufshpb_map_ctx *mctx)
-{
- int i;
-
- for (i = 0; i < hpb->pages_per_srgn; i++)
- mempool_free(mctx->m_page[i], ufshpb_page_pool);
-
- bitmap_free(mctx->ppn_dirty);
- kmem_cache_free(hpb->m_page_cache, mctx->m_page);
- mempool_free(mctx, ufshpb_mctx_pool);
-}
-
-static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn)
-{
- struct ufshpb_subregion *srgn;
- int srgn_idx;
-
- for_each_sub_region(rgn, srgn_idx, srgn)
- if (srgn->srgn_state == HPB_SRGN_ISSUED)
- return -EPERM;
-
- return 0;
-}
-
-static void ufshpb_read_to_handler(struct work_struct *work)
-{
- struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
- ufshpb_read_to_work.work);
- struct victim_select_info *lru_info = &hpb->lru_info;
- struct ufshpb_region *rgn, *next_rgn;
- unsigned long flags;
- unsigned int poll;
- LIST_HEAD(expired_list);
-
- if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
- return;
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
-
- list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
- list_lru_rgn) {
- bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
-
- if (timedout) {
- rgn->read_timeout_expiries--;
- if (is_rgn_dirty(rgn) ||
- rgn->read_timeout_expiries == 0)
- list_add(&rgn->list_expired_rgn, &expired_list);
- else
- rgn->read_timeout = ktime_add_ms(ktime_get(),
- hpb->params.read_timeout_ms);
- }
- }
-
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
-
- list_for_each_entry_safe(rgn, next_rgn, &expired_list,
- list_expired_rgn) {
- list_del_init(&rgn->list_expired_rgn);
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
- }
-
- ufshpb_kick_map_work(hpb);
-
- clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
-
- poll = hpb->params.timeout_polling_interval_ms;
- schedule_delayed_work(&hpb->ufshpb_read_to_work,
- msecs_to_jiffies(poll));
-}
-
-static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
- struct ufshpb_region *rgn)
-{
- rgn->rgn_state = HPB_RGN_ACTIVE;
- list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
- atomic_inc(&lru_info->active_cnt);
- if (rgn->hpb->is_hcm) {
- rgn->read_timeout =
- ktime_add_ms(ktime_get(),
- rgn->hpb->params.read_timeout_ms);
- rgn->read_timeout_expiries =
- rgn->hpb->params.read_timeout_expiries;
- }
-}
-
-static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
- struct ufshpb_region *rgn)
-{
- list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
-}
-
-static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
-{
- struct victim_select_info *lru_info = &hpb->lru_info;
- struct ufshpb_region *rgn, *victim_rgn = NULL;
-
- list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
- if (ufshpb_check_srgns_issue_state(hpb, rgn))
- continue;
-
- /*
- * in host control mode, verify that the exiting region
- * has fewer reads
- */
- if (hpb->is_hcm &&
- rgn->reads > hpb->params.eviction_thld_exit)
- continue;
-
- victim_rgn = rgn;
- break;
- }
-
- if (!victim_rgn)
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: no region allocated\n",
- __func__);
-
- return victim_rgn;
-}
-
-static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
- struct ufshpb_region *rgn)
-{
- list_del_init(&rgn->list_lru_rgn);
- rgn->rgn_state = HPB_RGN_INACTIVE;
- atomic_dec(&lru_info->active_cnt);
-}
-
-static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
- struct ufshpb_subregion *srgn)
-{
- if (srgn->srgn_state != HPB_SRGN_UNUSED) {
- ufshpb_put_map_ctx(hpb, srgn->mctx);
- srgn->srgn_state = HPB_SRGN_UNUSED;
- srgn->mctx = NULL;
- }
-}
-
-static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn,
- bool atomic)
-{
- struct ufshpb_req *umap_req;
- int rgn_idx = rgn ? rgn->rgn_idx : 0;
-
- umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic);
- if (!umap_req)
- return -ENOMEM;
-
- ufshpb_execute_umap_req(hpb, umap_req, rgn);
-
- return 0;
-}
-
-static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn)
-{
- return ufshpb_issue_umap_req(hpb, rgn, true);
-}
-
-static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn)
-{
- struct victim_select_info *lru_info;
- struct ufshpb_subregion *srgn;
- int srgn_idx;
-
- lru_info = &hpb->lru_info;
-
- dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx);
-
- ufshpb_cleanup_lru_info(lru_info, rgn);
-
- for_each_sub_region(rgn, srgn_idx, srgn)
- ufshpb_purge_active_subregion(hpb, srgn);
-}
-
-static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- if (rgn->rgn_state == HPB_RGN_PINNED) {
- dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
- "pinned region cannot drop-out. region %d\n",
- rgn->rgn_idx);
- goto out;
- }
-
- if (!list_empty(&rgn->list_lru_rgn)) {
- if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
- ret = -EBUSY;
- goto out;
- }
-
- if (hpb->is_hcm) {
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- ret = ufshpb_issue_umap_single_req(hpb, rgn);
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- if (ret)
- goto out;
- }
-
- __ufshpb_evict_region(hpb, rgn);
- }
-out:
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- return ret;
-}
-
-static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn,
- struct ufshpb_subregion *srgn)
-{
- struct ufshpb_req *map_req;
- unsigned long flags;
- int ret;
- int err = -EAGAIN;
- bool alloc_required = false;
- enum HPB_SRGN_STATE state = HPB_SRGN_INVALID;
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
-
- if (ufshpb_get_state(hpb) != HPB_PRESENT) {
- dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: ufshpb state is not PRESENT\n", __func__);
- goto unlock_out;
- }
-
- if ((rgn->rgn_state == HPB_RGN_INACTIVE) &&
- (srgn->srgn_state == HPB_SRGN_INVALID)) {
- err = 0;
- goto unlock_out;
- }
-
- if (srgn->srgn_state == HPB_SRGN_UNUSED)
- alloc_required = true;
-
- /*
- * If the subregion is already ISSUED state,
- * a specific event (e.g., GC or wear-leveling, etc.) occurs in
- * the device and HPB response for map loading is received.
- * In this case, after finishing the HPB_READ_BUFFER,
- * the next HPB_READ_BUFFER is performed again to obtain the latest
- * map data.
- */
- if (srgn->srgn_state == HPB_SRGN_ISSUED)
- goto unlock_out;
-
- srgn->srgn_state = HPB_SRGN_ISSUED;
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
-
- if (alloc_required) {
- srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
- if (!srgn->mctx) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "get map_ctx failed. region %d - %d\n",
- rgn->rgn_idx, srgn->srgn_idx);
- state = HPB_SRGN_UNUSED;
- goto change_srgn_state;
- }
- }
-
- map_req = ufshpb_get_map_req(hpb, srgn);
- if (!map_req)
- goto change_srgn_state;
-
-
- ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last);
- if (ret) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: issue map_req failed: %d, region %d - %d\n",
- __func__, ret, srgn->rgn_idx, srgn->srgn_idx);
- goto free_map_req;
- }
- return 0;
-
-free_map_req:
- ufshpb_put_map_req(hpb, map_req);
-change_srgn_state:
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- srgn->srgn_state = state;
-unlock_out:
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- return err;
-}
-
-static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
-{
- struct ufshpb_region *victim_rgn = NULL;
- struct victim_select_info *lru_info = &hpb->lru_info;
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- /*
- * If region belongs to lru_list, just move the region
- * to the front of lru list because the state of the region
- * is already active-state.
- */
- if (!list_empty(&rgn->list_lru_rgn)) {
- ufshpb_hit_lru_info(lru_info, rgn);
- goto out;
- }
-
- if (rgn->rgn_state == HPB_RGN_INACTIVE) {
- if (atomic_read(&lru_info->active_cnt) ==
- lru_info->max_lru_active_cnt) {
- /*
- * If the maximum number of active regions
- * is exceeded, evict the least recently used region.
- * This case may occur when the device responds
- * to the eviction information late.
- * It is okay to evict the least recently used region,
- * because the device could detect this region
- * by not issuing HPB_READ
- *
- * in host control mode, verify that the entering
- * region has enough reads
- */
- if (hpb->is_hcm &&
- rgn->reads < hpb->params.eviction_thld_enter) {
- ret = -EACCES;
- goto out;
- }
-
- victim_rgn = ufshpb_victim_lru_info(hpb);
- if (!victim_rgn) {
- dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
- "cannot get victim region %s\n",
- hpb->is_hcm ? "" : "error");
- ret = -ENOMEM;
- goto out;
- }
-
- dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
- "LRU full (%d), choose victim %d\n",
- atomic_read(&lru_info->active_cnt),
- victim_rgn->rgn_idx);
-
- if (hpb->is_hcm) {
- spin_unlock_irqrestore(&hpb->rgn_state_lock,
- flags);
- ret = ufshpb_issue_umap_single_req(hpb,
- victim_rgn);
- spin_lock_irqsave(&hpb->rgn_state_lock,
- flags);
- if (ret)
- goto out;
- }
-
- __ufshpb_evict_region(hpb, victim_rgn);
- }
-
- /*
- * When a region is added to lru_info list_head,
- * it is guaranteed that the subregion has been
- * assigned all mctx. If failed, try to receive mctx again
- * without being added to lru_info list_head
- */
- ufshpb_add_lru_info(lru_info, rgn);
- }
-out:
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- return ret;
-}
-/**
- *ufshpb_submit_region_inactive() - submit a region to be inactivated later
- *@hpb: per-LU HPB instance
- *@region_index: the index associated with the region that will be inactivated later
- */
-static void ufshpb_submit_region_inactive(struct ufshpb_lu *hpb, int region_index)
-{
- int subregion_index;
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
-
- /*
- * Remove this region from active region list and add it to inactive list
- */
- spin_lock(&hpb->rsp_list_lock);
- ufshpb_update_inactive_info(hpb, region_index);
- spin_unlock(&hpb->rsp_list_lock);
-
- rgn = hpb->rgn_tbl + region_index;
-
- /*
- * Set subregion state to be HPB_SRGN_INVALID, there will no HPB read on this subregion
- */
- spin_lock(&hpb->rgn_state_lock);
- if (rgn->rgn_state != HPB_RGN_INACTIVE) {
- for (subregion_index = 0; subregion_index < rgn->srgn_cnt; subregion_index++) {
- srgn = rgn->srgn_tbl + subregion_index;
- if (srgn->srgn_state == HPB_SRGN_VALID)
- srgn->srgn_state = HPB_SRGN_INVALID;
- }
- }
- spin_unlock(&hpb->rgn_state_lock);
-}
-
-static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
- struct utp_hpb_rsp *rsp_field)
-{
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
- int i, rgn_i, srgn_i;
-
- BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE);
- /*
- * If the active region and the inactive region are the same,
- * we will inactivate this region.
- * The device could check this (region inactivated) and
- * will response the proper active region information
- */
- for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
- rgn_i =
- be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
- srgn_i =
- be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
-
- rgn = hpb->rgn_tbl + rgn_i;
- if (hpb->is_hcm &&
- (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
- /*
- * in host control mode, subregion activation
- * recommendations are only allowed to active regions.
- * Also, ignore recommendations for dirty regions - the
- * host will make decisions concerning those by himself
- */
- continue;
- }
-
- dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
- "activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
-
- spin_lock(&hpb->rsp_list_lock);
- ufshpb_update_active_info(hpb, rgn_i, srgn_i);
- spin_unlock(&hpb->rsp_list_lock);
-
- srgn = rgn->srgn_tbl + srgn_i;
-
- /* blocking HPB_READ */
- spin_lock(&hpb->rgn_state_lock);
- if (srgn->srgn_state == HPB_SRGN_VALID)
- srgn->srgn_state = HPB_SRGN_INVALID;
- spin_unlock(&hpb->rgn_state_lock);
- }
-
- if (hpb->is_hcm) {
- /*
- * in host control mode the device is not allowed to inactivate
- * regions
- */
- goto out;
- }
-
- for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
- rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
- dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "inactivate(%d) region %d\n", i, rgn_i);
- ufshpb_submit_region_inactive(hpb, rgn_i);
- }
-
-out:
- dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
- rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
-
- if (ufshpb_get_state(hpb) == HPB_PRESENT)
- queue_work(ufshpb_wq, &hpb->map_work);
-}
-
-/*
- * Set the flags of all active regions to RGN_FLAG_UPDATE to let host side reload L2P entries later
- */
-static void ufshpb_set_regions_update(struct ufshpb_lu *hpb)
-{
- struct victim_select_info *lru_info = &hpb->lru_info;
- struct ufshpb_region *rgn;
- unsigned long flags;
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
-
- list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
- set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags);
-
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
-}
-
-static void ufshpb_dev_reset_handler(struct ufs_hba *hba)
-{
- struct scsi_device *sdev;
- struct ufshpb_lu *hpb;
-
- __shost_for_each_device(sdev, hba->host) {
- hpb = ufshpb_get_hpb_data(sdev);
- if (!hpb)
- continue;
-
- if (hpb->is_hcm) {
- /*
- * For the HPB host control mode, in case device powered up and lost HPB
- * information, we will set the region flag to be RGN_FLAG_UPDATE, it will
- * let host reload its L2P entries(reactivate region in the UFS device).
- */
- ufshpb_set_regions_update(hpb);
- } else {
- /*
- * For the HPB device control mode, if host side receives 02h:HPB Operation
- * in UPIU response, which means device recommends the host side should
- * inactivate all active regions. Here we add all active regions to inactive
- * list, they will be inactivated later in ufshpb_map_work_handler().
- */
- struct victim_select_info *lru_info = &hpb->lru_info;
- struct ufshpb_region *rgn;
-
- list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
- ufshpb_submit_region_inactive(hpb, rgn->rgn_idx);
-
- if (ufshpb_get_state(hpb) == HPB_PRESENT)
- queue_work(ufshpb_wq, &hpb->map_work);
- }
- }
-}
-
-/*
- * This function will parse recommended active subregion information in sense
- * data field of response UPIU with SAM_STAT_GOOD state.
- */
-void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device);
- struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
- int data_seg_len;
-
- data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
- & MASK_RSP_UPIU_DATA_SEG_LEN;
-
- /* If data segment length is zero, rsp_field is not valid */
- if (!data_seg_len)
- return;
-
- if (unlikely(lrbp->lun != rsp_field->lun)) {
- struct scsi_device *sdev;
- bool found = false;
-
- __shost_for_each_device(sdev, hba->host) {
- hpb = ufshpb_get_hpb_data(sdev);
-
- if (!hpb)
- continue;
-
- if (rsp_field->lun == hpb->lun) {
- found = true;
- break;
- }
- }
-
- if (!found)
- return;
- }
-
- if (!hpb)
- return;
-
- if (ufshpb_get_state(hpb) == HPB_INIT)
- return;
-
- if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
- (ufshpb_get_state(hpb) != HPB_SUSPEND)) {
- dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: ufshpb state is not PRESENT/SUSPEND\n",
- __func__);
- return;
- }
-
- BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
-
- if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
- return;
-
- hpb->stats.rcmd_noti_cnt++;
-
- switch (rsp_field->hpb_op) {
- case HPB_RSP_REQ_REGION_UPDATE:
- if (data_seg_len != DEV_DATA_SEG_LEN)
- dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: data seg length is not same.\n",
- __func__);
- ufshpb_rsp_req_region_update(hpb, rsp_field);
- break;
- case HPB_RSP_DEV_RESET:
- dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
- "UFS device lost HPB information during PM.\n");
- ufshpb_dev_reset_handler(hba);
-
- break;
- default:
- dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
- "hpb_op is not available: %d\n",
- rsp_field->hpb_op);
- break;
- }
-}
-
-static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn,
- struct ufshpb_subregion *srgn)
-{
- if (!list_empty(&rgn->list_inact_rgn))
- return;
-
- if (!list_empty(&srgn->list_act_srgn)) {
- list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
- return;
- }
-
- list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
-}
-
-static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn,
- struct list_head *pending_list)
-{
- struct ufshpb_subregion *srgn;
- int srgn_idx;
-
- if (!list_empty(&rgn->list_inact_rgn))
- return;
-
- for_each_sub_region(rgn, srgn_idx, srgn)
- if (!list_empty(&srgn->list_act_srgn))
- return;
-
- list_add_tail(&rgn->list_inact_rgn, pending_list);
-}
-
-static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb)
-{
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn,
- struct ufshpb_subregion,
- list_act_srgn))) {
- if (ufshpb_get_state(hpb) == HPB_SUSPEND)
- break;
-
- list_del_init(&srgn->list_act_srgn);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
-
- rgn = hpb->rgn_tbl + srgn->rgn_idx;
- ret = ufshpb_add_region(hpb, rgn);
- if (ret)
- goto active_failed;
-
- ret = ufshpb_issue_map_req(hpb, rgn, srgn);
- if (ret) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "issue map_req failed. ret %d, region %d - %d\n",
- ret, rgn->rgn_idx, srgn->srgn_idx);
- goto active_failed;
- }
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- }
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
- return;
-
-active_failed:
- dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n",
- rgn->rgn_idx, srgn->srgn_idx);
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- ufshpb_add_active_list(hpb, rgn, srgn);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
-}
-
-static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
-{
- struct ufshpb_region *rgn;
- unsigned long flags;
- int ret;
- LIST_HEAD(pending_list);
-
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn,
- struct ufshpb_region,
- list_inact_rgn))) {
- if (ufshpb_get_state(hpb) == HPB_SUSPEND)
- break;
-
- list_del_init(&rgn->list_inact_rgn);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
-
- ret = ufshpb_evict_region(hpb, rgn);
- if (ret) {
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- ufshpb_add_pending_evict_list(hpb, rgn, &pending_list);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
- }
-
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- }
-
- list_splice(&pending_list, &hpb->lh_inact_rgn);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
-}
-
-static void ufshpb_normalization_work_handler(struct work_struct *work)
-{
- struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
- ufshpb_normalization_work);
- int rgn_idx;
- u8 factor = hpb->params.normalization_factor;
-
- for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
- struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
- int srgn_idx;
-
- spin_lock(&rgn->rgn_lock);
- rgn->reads = 0;
- for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
- struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
-
- srgn->reads >>= factor;
- rgn->reads += srgn->reads;
- }
- spin_unlock(&rgn->rgn_lock);
-
- if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
- continue;
-
- /* if region is active but has no reads - inactivate it */
- spin_lock(&hpb->rsp_list_lock);
- ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
- spin_unlock(&hpb->rsp_list_lock);
- }
-}
-
-static void ufshpb_map_work_handler(struct work_struct *work)
-{
- struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
-
- if (ufshpb_get_state(hpb) != HPB_PRESENT) {
- dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: ufshpb state is not PRESENT\n", __func__);
- return;
- }
-
- ufshpb_run_inactive_region_list(hpb);
- ufshpb_run_active_subregion_list(hpb);
-}
-
-/*
- * this function doesn't need to hold lock due to be called in init.
- * (rgn_state_lock, rsp_list_lock, etc..)
- */
-static int ufshpb_init_pinned_active_region(struct ufs_hba *hba,
- struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn)
-{
- struct ufshpb_subregion *srgn;
- int srgn_idx, i;
- int err = 0;
-
- for_each_sub_region(rgn, srgn_idx, srgn) {
- srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
- srgn->srgn_state = HPB_SRGN_INVALID;
- if (!srgn->mctx) {
- err = -ENOMEM;
- dev_err(hba->dev,
- "alloc mctx for pinned region failed\n");
- goto release;
- }
-
- list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
- }
-
- rgn->rgn_state = HPB_RGN_PINNED;
- return 0;
-
-release:
- for (i = 0; i < srgn_idx; i++) {
- srgn = rgn->srgn_tbl + i;
- ufshpb_put_map_ctx(hpb, srgn->mctx);
- }
- return err;
-}
-
-static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn, bool last)
-{
- int srgn_idx;
- struct ufshpb_subregion *srgn;
-
- for_each_sub_region(rgn, srgn_idx, srgn) {
- INIT_LIST_HEAD(&srgn->list_act_srgn);
-
- srgn->rgn_idx = rgn->rgn_idx;
- srgn->srgn_idx = srgn_idx;
- srgn->srgn_state = HPB_SRGN_UNUSED;
- }
-
- if (unlikely(last && hpb->last_srgn_entries))
- srgn->is_last = true;
-}
-
-static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn, int srgn_cnt)
-{
- rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion),
- GFP_KERNEL);
- if (!rgn->srgn_tbl)
- return -ENOMEM;
-
- rgn->srgn_cnt = srgn_cnt;
- return 0;
-}
-
-static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
- struct ufshpb_lu *hpb,
- struct ufshpb_dev_info *hpb_dev_info,
- struct ufshpb_lu_info *hpb_lu_info)
-{
- u32 entries_per_rgn;
- u64 rgn_mem_size, tmp;
-
- if (ufshpb_is_legacy(hba))
- hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
- else
- hpb->pre_req_max_tr_len = hpb_dev_info->max_hpb_single_cmd;
-
- hpb->lu_pinned_start = hpb_lu_info->pinned_start;
- hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
- (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
- : PINNED_NOT_SET;
- hpb->lru_info.max_lru_active_cnt =
- hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned;
-
- rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT
- * HPB_ENTRY_SIZE;
- do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE);
- hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size)
- * HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE;
-
- tmp = rgn_mem_size;
- do_div(tmp, HPB_ENTRY_SIZE);
- entries_per_rgn = (u32)tmp;
- hpb->entries_per_rgn_shift = ilog2(entries_per_rgn);
- hpb->entries_per_rgn_mask = entries_per_rgn - 1;
-
- hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE;
- hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn);
- hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1;
-
- tmp = rgn_mem_size;
- do_div(tmp, hpb->srgn_mem_size);
- hpb->srgns_per_rgn = (int)tmp;
-
- hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
- entries_per_rgn);
- hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
- (hpb->srgn_mem_size / HPB_ENTRY_SIZE));
- hpb->last_srgn_entries = hpb_lu_info->num_blocks
- % (hpb->srgn_mem_size / HPB_ENTRY_SIZE);
-
- hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE);
-
- if (hpb_dev_info->control_mode == HPB_HOST_CONTROL)
- hpb->is_hcm = true;
-}
-
-static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
-{
- struct ufshpb_region *rgn_table, *rgn;
- int rgn_idx, i;
- int ret = 0;
-
- rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region),
- GFP_KERNEL);
- if (!rgn_table)
- return -ENOMEM;
-
- for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
- int srgn_cnt = hpb->srgns_per_rgn;
- bool last_srgn = false;
-
- rgn = rgn_table + rgn_idx;
- rgn->rgn_idx = rgn_idx;
-
- spin_lock_init(&rgn->rgn_lock);
-
- INIT_LIST_HEAD(&rgn->list_inact_rgn);
- INIT_LIST_HEAD(&rgn->list_lru_rgn);
- INIT_LIST_HEAD(&rgn->list_expired_rgn);
-
- if (rgn_idx == hpb->rgns_per_lu - 1) {
- srgn_cnt = ((hpb->srgns_per_lu - 1) %
- hpb->srgns_per_rgn) + 1;
- last_srgn = true;
- }
-
- ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt);
- if (ret)
- goto release_srgn_table;
- ufshpb_init_subregion_tbl(hpb, rgn, last_srgn);
-
- if (ufshpb_is_pinned_region(hpb, rgn_idx)) {
- ret = ufshpb_init_pinned_active_region(hba, hpb, rgn);
- if (ret)
- goto release_srgn_table;
- } else {
- rgn->rgn_state = HPB_RGN_INACTIVE;
- }
-
- rgn->rgn_flags = 0;
- rgn->hpb = hpb;
- }
-
- hpb->rgn_tbl = rgn_table;
-
- return 0;
-
-release_srgn_table:
- for (i = 0; i <= rgn_idx; i++)
- kvfree(rgn_table[i].srgn_tbl);
-
- kvfree(rgn_table);
- return ret;
-}
-
-static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn)
-{
- int srgn_idx;
- struct ufshpb_subregion *srgn;
-
- for_each_sub_region(rgn, srgn_idx, srgn)
- if (srgn->srgn_state != HPB_SRGN_UNUSED) {
- srgn->srgn_state = HPB_SRGN_UNUSED;
- ufshpb_put_map_ctx(hpb, srgn->mctx);
- }
-}
-
-static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
-{
- int rgn_idx;
-
- for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
- struct ufshpb_region *rgn;
-
- rgn = hpb->rgn_tbl + rgn_idx;
- if (rgn->rgn_state != HPB_RGN_INACTIVE) {
- rgn->rgn_state = HPB_RGN_INACTIVE;
-
- ufshpb_destroy_subregion_tbl(hpb, rgn);
- }
-
- kvfree(rgn->srgn_tbl);
- }
-
- kvfree(hpb->rgn_tbl);
-}
-
-/* SYSFS functions */
-#define ufshpb_sysfs_attr_show_func(__name) \
-static ssize_t __name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct scsi_device *sdev = to_scsi_device(dev); \
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \
- \
- if (!hpb) \
- return -ENODEV; \
- \
- return sysfs_emit(buf, "%llu\n", hpb->stats.__name); \
-} \
-\
-static DEVICE_ATTR_RO(__name)
-
-ufshpb_sysfs_attr_show_func(hit_cnt);
-ufshpb_sysfs_attr_show_func(miss_cnt);
-ufshpb_sysfs_attr_show_func(rcmd_noti_cnt);
-ufshpb_sysfs_attr_show_func(rcmd_active_cnt);
-ufshpb_sysfs_attr_show_func(rcmd_inactive_cnt);
-ufshpb_sysfs_attr_show_func(map_req_cnt);
-ufshpb_sysfs_attr_show_func(umap_req_cnt);
-
-static struct attribute *hpb_dev_stat_attrs[] = {
- &dev_attr_hit_cnt.attr,
- &dev_attr_miss_cnt.attr,
- &dev_attr_rcmd_noti_cnt.attr,
- &dev_attr_rcmd_active_cnt.attr,
- &dev_attr_rcmd_inactive_cnt.attr,
- &dev_attr_map_req_cnt.attr,
- &dev_attr_umap_req_cnt.attr,
- NULL,
-};
-
-struct attribute_group ufs_sysfs_hpb_stat_group = {
- .name = "hpb_stats",
- .attrs = hpb_dev_stat_attrs,
-};
-
-/* SYSFS functions */
-#define ufshpb_sysfs_param_show_func(__name) \
-static ssize_t __name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct scsi_device *sdev = to_scsi_device(dev); \
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \
- \
- if (!hpb) \
- return -ENODEV; \
- \
- return sysfs_emit(buf, "%d\n", hpb->params.__name); \
-}
-
-ufshpb_sysfs_param_show_func(requeue_timeout_ms);
-static ssize_t
-requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- if (val < 0)
- return -EINVAL;
-
- hpb->params.requeue_timeout_ms = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(requeue_timeout_ms);
-
-ufshpb_sysfs_param_show_func(activation_thld);
-static ssize_t
-activation_thld_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- if (val <= 0)
- return -EINVAL;
-
- hpb->params.activation_thld = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(activation_thld);
-
-ufshpb_sysfs_param_show_func(normalization_factor);
-static ssize_t
-normalization_factor_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- if (val <= 0 || val > ilog2(hpb->entries_per_srgn))
- return -EINVAL;
-
- hpb->params.normalization_factor = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(normalization_factor);
-
-ufshpb_sysfs_param_show_func(eviction_thld_enter);
-static ssize_t
-eviction_thld_enter_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- if (val <= hpb->params.eviction_thld_exit)
- return -EINVAL;
-
- hpb->params.eviction_thld_enter = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(eviction_thld_enter);
-
-ufshpb_sysfs_param_show_func(eviction_thld_exit);
-static ssize_t
-eviction_thld_exit_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- if (val <= hpb->params.activation_thld)
- return -EINVAL;
-
- hpb->params.eviction_thld_exit = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(eviction_thld_exit);
-
-ufshpb_sysfs_param_show_func(read_timeout_ms);
-static ssize_t
-read_timeout_ms_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- /* read_timeout >> timeout_polling_interval */
- if (val < hpb->params.timeout_polling_interval_ms * 2)
- return -EINVAL;
-
- hpb->params.read_timeout_ms = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(read_timeout_ms);
-
-ufshpb_sysfs_param_show_func(read_timeout_expiries);
-static ssize_t
-read_timeout_expiries_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- if (val <= 0)
- return -EINVAL;
-
- hpb->params.read_timeout_expiries = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(read_timeout_expiries);
-
-ufshpb_sysfs_param_show_func(timeout_polling_interval_ms);
-static ssize_t
-timeout_polling_interval_ms_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- /* timeout_polling_interval << read_timeout */
- if (val <= 0 || val > hpb->params.read_timeout_ms / 2)
- return -EINVAL;
-
- hpb->params.timeout_polling_interval_ms = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(timeout_polling_interval_ms);
-
-ufshpb_sysfs_param_show_func(inflight_map_req);
-static ssize_t inflight_map_req_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1)
- return -EINVAL;
-
- hpb->params.inflight_map_req = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(inflight_map_req);
-
-static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb)
-{
- hpb->params.activation_thld = ACTIVATION_THRESHOLD;
- hpb->params.normalization_factor = 1;
- hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5);
- hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4);
- hpb->params.read_timeout_ms = READ_TO_MS;
- hpb->params.read_timeout_expiries = READ_TO_EXPIRIES;
- hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS;
- hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT;
-}
-
-static struct attribute *hpb_dev_param_attrs[] = {
- &dev_attr_requeue_timeout_ms.attr,
- &dev_attr_activation_thld.attr,
- &dev_attr_normalization_factor.attr,
- &dev_attr_eviction_thld_enter.attr,
- &dev_attr_eviction_thld_exit.attr,
- &dev_attr_read_timeout_ms.attr,
- &dev_attr_read_timeout_expiries.attr,
- &dev_attr_timeout_polling_interval_ms.attr,
- &dev_attr_inflight_map_req.attr,
- NULL,
-};
-
-struct attribute_group ufs_sysfs_hpb_param_group = {
- .name = "hpb_params",
- .attrs = hpb_dev_param_attrs,
-};
-
-static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
-{
- struct ufshpb_req *pre_req = NULL, *t;
- int qd = hpb->sdev_ufs_lu->queue_depth / 2;
- int i;
-
- INIT_LIST_HEAD(&hpb->lh_pre_req_free);
-
- hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL);
- hpb->throttle_pre_req = qd;
- hpb->num_inflight_pre_req = 0;
-
- if (!hpb->pre_req)
- goto release_mem;
-
- for (i = 0; i < qd; i++) {
- pre_req = hpb->pre_req + i;
- INIT_LIST_HEAD(&pre_req->list_req);
- pre_req->req = NULL;
-
- pre_req->bio = bio_alloc(NULL, 1, 0, GFP_KERNEL);
- if (!pre_req->bio)
- goto release_mem;
-
- pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!pre_req->wb.m_page) {
- bio_put(pre_req->bio);
- goto release_mem;
- }
-
- list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
- }
-
- return 0;
-release_mem:
- list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) {
- list_del_init(&pre_req->list_req);
- bio_put(pre_req->bio);
- __free_page(pre_req->wb.m_page);
- }
-
- kfree(hpb->pre_req);
- return -ENOMEM;
-}
-
-static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb)
-{
- struct ufshpb_req *pre_req = NULL;
- int i;
-
- for (i = 0; i < hpb->throttle_pre_req; i++) {
- pre_req = hpb->pre_req + i;
- bio_put(hpb->pre_req[i].bio);
- if (!pre_req->wb.m_page)
- __free_page(hpb->pre_req[i].wb.m_page);
- list_del_init(&pre_req->list_req);
- }
-
- kfree(hpb->pre_req);
-}
-
-static void ufshpb_stat_init(struct ufshpb_lu *hpb)
-{
- hpb->stats.hit_cnt = 0;
- hpb->stats.miss_cnt = 0;
- hpb->stats.rcmd_noti_cnt = 0;
- hpb->stats.rcmd_active_cnt = 0;
- hpb->stats.rcmd_inactive_cnt = 0;
- hpb->stats.map_req_cnt = 0;
- hpb->stats.umap_req_cnt = 0;
-}
-
-static void ufshpb_param_init(struct ufshpb_lu *hpb)
-{
- hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS;
- if (hpb->is_hcm)
- ufshpb_hcm_param_init(hpb);
-}
-
-static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
-{
- int ret;
-
- spin_lock_init(&hpb->rgn_state_lock);
- spin_lock_init(&hpb->rsp_list_lock);
- spin_lock_init(&hpb->param_lock);
-
- INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn);
- INIT_LIST_HEAD(&hpb->lh_act_srgn);
- INIT_LIST_HEAD(&hpb->lh_inact_rgn);
- INIT_LIST_HEAD(&hpb->list_hpb_lu);
-
- INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
- if (hpb->is_hcm) {
- INIT_WORK(&hpb->ufshpb_normalization_work,
- ufshpb_normalization_work_handler);
- INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
- ufshpb_read_to_handler);
- }
-
- hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
- sizeof(struct ufshpb_req), 0, 0, NULL);
- if (!hpb->map_req_cache) {
- dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail",
- hpb->lun);
- return -ENOMEM;
- }
-
- hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
- sizeof(struct page *) * hpb->pages_per_srgn,
- 0, 0, NULL);
- if (!hpb->m_page_cache) {
- dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail",
- hpb->lun);
- ret = -ENOMEM;
- goto release_req_cache;
- }
-
- ret = ufshpb_pre_req_mempool_init(hpb);
- if (ret) {
- dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail",
- hpb->lun);
- goto release_m_page_cache;
- }
-
- ret = ufshpb_alloc_region_tbl(hba, hpb);
- if (ret)
- goto release_pre_req_mempool;
-
- ufshpb_stat_init(hpb);
- ufshpb_param_init(hpb);
-
- if (hpb->is_hcm) {
- unsigned int poll;
-
- poll = hpb->params.timeout_polling_interval_ms;
- schedule_delayed_work(&hpb->ufshpb_read_to_work,
- msecs_to_jiffies(poll));
- }
-
- return 0;
-
-release_pre_req_mempool:
- ufshpb_pre_req_mempool_destroy(hpb);
-release_m_page_cache:
- kmem_cache_destroy(hpb->m_page_cache);
-release_req_cache:
- kmem_cache_destroy(hpb->map_req_cache);
- return ret;
-}
-
-static struct ufshpb_lu *
-ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev,
- struct ufshpb_dev_info *hpb_dev_info,
- struct ufshpb_lu_info *hpb_lu_info)
-{
- struct ufshpb_lu *hpb;
- int ret;
-
- hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL);
- if (!hpb)
- return NULL;
-
- hpb->lun = sdev->lun;
- hpb->sdev_ufs_lu = sdev;
-
- ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info);
-
- ret = ufshpb_lu_hpb_init(hba, hpb);
- if (ret) {
- dev_err(hba->dev, "hpb lu init failed. ret %d", ret);
- goto release_hpb;
- }
-
- sdev->hostdata = hpb;
- return hpb;
-
-release_hpb:
- kfree(hpb);
- return NULL;
-}
-
-static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
-{
- struct ufshpb_region *rgn, *next_rgn;
- struct ufshpb_subregion *srgn, *next_srgn;
- unsigned long flags;
-
- /*
- * If the device reset occurred, the remaining HPB region information
- * may be stale. Therefore, by discarding the lists of HPB response
- * that remained after reset, we prevent unnecessary work.
- */
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn,
- list_inact_rgn)
- list_del_init(&rgn->list_inact_rgn);
-
- list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn,
- list_act_srgn)
- list_del_init(&srgn->list_act_srgn);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
-}
-
-static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
-{
- if (hpb->is_hcm) {
- cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
- cancel_work_sync(&hpb->ufshpb_normalization_work);
- }
- cancel_work_sync(&hpb->map_work);
-}
-
-static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba)
-{
- int err = 0;
- bool flag_res = true;
- int try;
-
- /* wait for the device to complete HPB reset query */
- for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
- dev_dbg(hba->dev,
- "%s: start flag reset polling %d times\n",
- __func__, try);
-
- /* Poll fHpbReset flag to be cleared */
- err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res);
-
- if (err) {
- dev_err(hba->dev,
- "%s: reading fHpbReset flag failed with error %d\n",
- __func__, err);
- return flag_res;
- }
-
- if (!flag_res)
- goto out;
-
- usleep_range(1000, 1100);
- }
- if (flag_res) {
- dev_err(hba->dev,
- "%s: fHpbReset was not cleared by the device\n",
- __func__);
- }
-out:
- return flag_res;
-}
-
-/**
- * ufshpb_toggle_state - switch HPB state of all LUs
- * @hba: per-adapter instance
- * @src: expected current HPB state
- * @dest: target HPB state to switch to
- */
-void ufshpb_toggle_state(struct ufs_hba *hba, enum UFSHPB_STATE src, enum UFSHPB_STATE dest)
-{
- struct ufshpb_lu *hpb;
- struct scsi_device *sdev;
-
- shost_for_each_device(sdev, hba->host) {
- hpb = ufshpb_get_hpb_data(sdev);
-
- if (!hpb || ufshpb_get_state(hpb) != src)
- continue;
- ufshpb_set_state(hpb, dest);
-
- if (dest == HPB_RESET) {
- ufshpb_cancel_jobs(hpb);
- ufshpb_discard_rsp_lists(hpb);
- }
- }
-}
-
-void ufshpb_suspend(struct ufs_hba *hba)
-{
- struct ufshpb_lu *hpb;
- struct scsi_device *sdev;
-
- shost_for_each_device(sdev, hba->host) {
- hpb = ufshpb_get_hpb_data(sdev);
- if (!hpb || ufshpb_get_state(hpb) != HPB_PRESENT)
- continue;
-
- ufshpb_set_state(hpb, HPB_SUSPEND);
- ufshpb_cancel_jobs(hpb);
- }
-}
-
-void ufshpb_resume(struct ufs_hba *hba)
-{
- struct ufshpb_lu *hpb;
- struct scsi_device *sdev;
-
- shost_for_each_device(sdev, hba->host) {
- hpb = ufshpb_get_hpb_data(sdev);
- if (!hpb || ufshpb_get_state(hpb) != HPB_SUSPEND)
- continue;
-
- ufshpb_set_state(hpb, HPB_PRESENT);
- ufshpb_kick_map_work(hpb);
- if (hpb->is_hcm) {
- unsigned int poll = hpb->params.timeout_polling_interval_ms;
-
- schedule_delayed_work(&hpb->ufshpb_read_to_work, msecs_to_jiffies(poll));
- }
- }
-}
-
-static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
- struct ufshpb_lu_info *hpb_lu_info)
-{
- u16 max_active_rgns;
- u8 lu_enable;
- int size = QUERY_DESC_MAX_SIZE;
- int ret;
- char desc_buf[QUERY_DESC_MAX_SIZE];
-
- ufshcd_rpm_get_sync(hba);
- ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
- QUERY_DESC_IDN_UNIT, lun, 0,
- desc_buf, &size);
- ufshcd_rpm_put_sync(hba);
-
- if (ret) {
- dev_err(hba->dev,
- "%s: idn: %d lun: %d query request failed",
- __func__, QUERY_DESC_IDN_UNIT, lun);
- return ret;
- }
-
- lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE];
- if (lu_enable != LU_ENABLED_HPB_FUNC)
- return -ENODEV;
-
- max_active_rgns = get_unaligned_be16(
- desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS);
- if (!max_active_rgns) {
- dev_err(hba->dev,
- "lun %d wrong number of max active regions\n", lun);
- return -ENODEV;
- }
-
- hpb_lu_info->num_blocks = get_unaligned_be64(
- desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT);
- hpb_lu_info->pinned_start = get_unaligned_be16(
- desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF);
- hpb_lu_info->num_pinned = get_unaligned_be16(
- desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS);
- hpb_lu_info->max_active_rgns = max_active_rgns;
-
- return 0;
-}
-
-void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev)
-{
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
-
- if (!hpb)
- return;
-
- ufshpb_set_state(hpb, HPB_FAILED);
-
- sdev = hpb->sdev_ufs_lu;
- sdev->hostdata = NULL;
-
- ufshpb_cancel_jobs(hpb);
-
- ufshpb_pre_req_mempool_destroy(hpb);
- ufshpb_destroy_region_tbl(hpb);
-
- kmem_cache_destroy(hpb->map_req_cache);
- kmem_cache_destroy(hpb->m_page_cache);
-
- list_del_init(&hpb->list_hpb_lu);
-
- kfree(hpb);
-}
-
-static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
-{
- int pool_size;
- struct ufshpb_lu *hpb;
- struct scsi_device *sdev;
- bool init_success;
-
- if (tot_active_srgn_pages == 0) {
- ufshpb_remove(hba);
- return;
- }
-
- init_success = !ufshpb_check_hpb_reset_query(hba);
-
- pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * SZ_1K) / PAGE_SIZE;
- if (pool_size > tot_active_srgn_pages) {
- mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
- mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
- }
-
- shost_for_each_device(sdev, hba->host) {
- hpb = ufshpb_get_hpb_data(sdev);
- if (!hpb)
- continue;
-
- if (init_success) {
- ufshpb_set_state(hpb, HPB_PRESENT);
- if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
- queue_work(ufshpb_wq, &hpb->map_work);
- } else {
- dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
- ufshpb_destroy_lu(hba, sdev);
- }
- }
-
- if (!init_success)
- ufshpb_remove(hba);
-}
-
-void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev)
-{
- struct ufshpb_lu *hpb;
- int ret;
- struct ufshpb_lu_info hpb_lu_info = { 0 };
- int lun = sdev->lun;
-
- if (lun >= hba->dev_info.max_lu_supported)
- goto out;
-
- ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info);
- if (ret)
- goto out;
-
- hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev,
- &hpb_lu_info);
- if (!hpb)
- goto out;
-
- tot_active_srgn_pages += hpb_lu_info.max_active_rgns *
- hpb->srgns_per_rgn * hpb->pages_per_srgn;
-
-out:
- /* All LUs are initialized */
- if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt))
- ufshpb_hpb_lu_prepared(hba);
-}
-
-static int ufshpb_init_mem_wq(struct ufs_hba *hba)
-{
- int ret;
- unsigned int pool_size;
-
- ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache",
- sizeof(struct ufshpb_map_ctx),
- 0, 0, NULL);
- if (!ufshpb_mctx_cache) {
- dev_err(hba->dev, "ufshpb: cannot init mctx cache\n");
- return -ENOMEM;
- }
-
- pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * SZ_1K) / PAGE_SIZE;
- dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
- __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
-
- ufshpb_mctx_pool = mempool_create_slab_pool(pool_size,
- ufshpb_mctx_cache);
- if (!ufshpb_mctx_pool) {
- dev_err(hba->dev, "ufshpb: cannot init mctx pool\n");
- ret = -ENOMEM;
- goto release_mctx_cache;
- }
-
- ufshpb_page_pool = mempool_create_page_pool(pool_size, 0);
- if (!ufshpb_page_pool) {
- dev_err(hba->dev, "ufshpb: cannot init page pool\n");
- ret = -ENOMEM;
- goto release_mctx_pool;
- }
-
- ufshpb_wq = alloc_workqueue("ufshpb-wq",
- WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
- if (!ufshpb_wq) {
- dev_err(hba->dev, "ufshpb: alloc workqueue failed\n");
- ret = -ENOMEM;
- goto release_page_pool;
- }
-
- return 0;
-
-release_page_pool:
- mempool_destroy(ufshpb_page_pool);
-release_mctx_pool:
- mempool_destroy(ufshpb_mctx_pool);
-release_mctx_cache:
- kmem_cache_destroy(ufshpb_mctx_cache);
- return ret;
-}
-
-void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf)
-{
- struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev;
- int max_active_rgns = 0;
- int hpb_num_lu;
-
- hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU];
- if (hpb_num_lu == 0) {
- dev_err(hba->dev, "No HPB LU supported\n");
- hpb_info->hpb_disabled = true;
- return;
- }
-
- hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE];
- hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE];
- max_active_rgns = get_unaligned_be16(geo_buf +
- GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS);
-
- if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 ||
- max_active_rgns == 0) {
- dev_err(hba->dev, "No HPB supported device\n");
- hpb_info->hpb_disabled = true;
- return;
- }
-}
-
-void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
-{
- struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
- int version, ret;
- int max_single_cmd;
-
- hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
-
- version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER);
- if ((version != HPB_SUPPORT_VERSION) &&
- (version != HPB_SUPPORT_LEGACY_VERSION)) {
- dev_err(hba->dev, "%s: HPB %x version is not supported.\n",
- __func__, version);
- hpb_dev_info->hpb_disabled = true;
- return;
- }
-
- if (version == HPB_SUPPORT_LEGACY_VERSION)
- hpb_dev_info->is_legacy = true;
-
- /*
- * Get the number of user logical unit to check whether all
- * scsi_device finish initialization
- */
- hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
-
- if (hpb_dev_info->is_legacy)
- return;
-
- ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_single_cmd);
-
- if (ret)
- hpb_dev_info->max_hpb_single_cmd = HPB_LEGACY_CHUNK_HIGH;
- else
- hpb_dev_info->max_hpb_single_cmd = min(max_single_cmd + 1, HPB_MULTI_CHUNK_HIGH);
-}
-
-void ufshpb_init(struct ufs_hba *hba)
-{
- struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
- int try;
- int ret;
-
- if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled)
- return;
-
- if (ufshpb_init_mem_wq(hba)) {
- hpb_dev_info->hpb_disabled = true;
- return;
- }
-
- atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu);
- tot_active_srgn_pages = 0;
- /* issue HPB reset query */
- for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
- ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
- QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
- if (!ret)
- break;
- }
-}
-
-void ufshpb_remove(struct ufs_hba *hba)
-{
- mempool_destroy(ufshpb_page_pool);
- mempool_destroy(ufshpb_mctx_pool);
- kmem_cache_destroy(ufshpb_mctx_cache);
-
- destroy_workqueue(ufshpb_wq);
-}
-
-module_param(ufshpb_host_map_kbytes, uint, 0644);
-MODULE_PARM_DESC(ufshpb_host_map_kbytes,
- "ufshpb host mapping memory kilo-bytes for ufshpb memory-pool");
diff --git a/drivers/ufs/core/ufshpb.h b/drivers/ufs/core/ufshpb.h
deleted file mode 100644
index b428bbdd2799..000000000000
--- a/drivers/ufs/core/ufshpb.h
+++ /dev/null
@@ -1,318 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Universal Flash Storage Host Performance Booster
- *
- * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
- *
- * Authors:
- * Yongmyung Lee <ymhungry.lee@samsung.com>
- * Jinyoung Choi <j-young.choi@samsung.com>
- */
-
-#ifndef _UFSHPB_H_
-#define _UFSHPB_H_
-
-/* hpb response UPIU macro */
-#define HPB_RSP_NONE 0x0
-#define HPB_RSP_REQ_REGION_UPDATE 0x1
-#define HPB_RSP_DEV_RESET 0x2
-#define MAX_ACTIVE_NUM 2
-#define MAX_INACTIVE_NUM 2
-#define DEV_DATA_SEG_LEN 0x14
-#define DEV_SENSE_SEG_LEN 0x12
-#define DEV_DES_TYPE 0x80
-#define DEV_ADDITIONAL_LEN 0x10
-
-/* hpb map & entries macro */
-#define HPB_RGN_SIZE_UNIT 512
-#define HPB_ENTRY_BLOCK_SIZE SZ_4K
-#define HPB_ENTRY_SIZE 0x8
-#define PINNED_NOT_SET U32_MAX
-
-/* hpb support chunk size */
-#define HPB_LEGACY_CHUNK_HIGH 1
-#define HPB_MULTI_CHUNK_HIGH 255
-
-/* hpb vender defined opcode */
-#define UFSHPB_READ 0xF8
-#define UFSHPB_READ_BUFFER 0xF9
-#define UFSHPB_READ_BUFFER_ID 0x01
-#define UFSHPB_WRITE_BUFFER 0xFA
-#define UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID 0x01
-#define UFSHPB_WRITE_BUFFER_PREFETCH_ID 0x02
-#define UFSHPB_WRITE_BUFFER_INACT_ALL_ID 0x03
-#define HPB_WRITE_BUFFER_CMD_LENGTH 10
-#define MAX_HPB_READ_ID 0x7F
-#define HPB_READ_BUFFER_CMD_LENGTH 10
-#define LU_ENABLED_HPB_FUNC 0x02
-
-#define HPB_RESET_REQ_RETRIES 10
-#define HPB_MAP_REQ_RETRIES 5
-#define HPB_REQUEUE_TIME_MS 0
-
-#define HPB_SUPPORT_VERSION 0x200
-#define HPB_SUPPORT_LEGACY_VERSION 0x100
-
-enum UFSHPB_MODE {
- HPB_HOST_CONTROL,
- HPB_DEVICE_CONTROL,
-};
-
-enum UFSHPB_STATE {
- HPB_INIT,
- HPB_PRESENT,
- HPB_SUSPEND,
- HPB_FAILED,
- HPB_RESET,
-};
-
-enum HPB_RGN_STATE {
- HPB_RGN_INACTIVE,
- HPB_RGN_ACTIVE,
- /* pinned regions are always active */
- HPB_RGN_PINNED,
-};
-
-enum HPB_SRGN_STATE {
- HPB_SRGN_UNUSED,
- HPB_SRGN_INVALID,
- HPB_SRGN_VALID,
- HPB_SRGN_ISSUED,
-};
-
-/**
- * struct ufshpb_lu_info - UFSHPB logical unit related info
- * @num_blocks: the number of logical block
- * @pinned_start: the start region number of pinned region
- * @num_pinned: the number of pinned regions
- * @max_active_rgns: maximum number of active regions
- */
-struct ufshpb_lu_info {
- int num_blocks;
- int pinned_start;
- int num_pinned;
- int max_active_rgns;
-};
-
-struct ufshpb_map_ctx {
- struct page **m_page;
- unsigned long *ppn_dirty;
-};
-
-struct ufshpb_subregion {
- struct ufshpb_map_ctx *mctx;
- enum HPB_SRGN_STATE srgn_state;
- int rgn_idx;
- int srgn_idx;
- bool is_last;
-
- /* subregion reads - for host mode */
- unsigned int reads;
-
- /* below information is used by rsp_list */
- struct list_head list_act_srgn;
-};
-
-struct ufshpb_region {
- struct ufshpb_lu *hpb;
- struct ufshpb_subregion *srgn_tbl;
- enum HPB_RGN_STATE rgn_state;
- int rgn_idx;
- int srgn_cnt;
-
- /* below information is used by rsp_list */
- struct list_head list_inact_rgn;
-
- /* below information is used by lru */
- struct list_head list_lru_rgn;
- unsigned long rgn_flags;
-#define RGN_FLAG_DIRTY 0
-#define RGN_FLAG_UPDATE 1
-
- /* region reads - for host mode */
- spinlock_t rgn_lock;
- unsigned int reads;
- /* region "cold" timer - for host mode */
- ktime_t read_timeout;
- unsigned int read_timeout_expiries;
- struct list_head list_expired_rgn;
-};
-
-#define for_each_sub_region(rgn, i, srgn) \
- for ((i) = 0; \
- ((i) < (rgn)->srgn_cnt) && ((srgn) = &(rgn)->srgn_tbl[i]); \
- (i)++)
-
-/**
- * struct ufshpb_req - HPB related request structure (write/read buffer)
- * @req: block layer request structure
- * @bio: bio for this request
- * @hpb: ufshpb_lu structure that related to
- * @list_req: ufshpb_req mempool list
- * @sense: store its sense data
- * @mctx: L2P map information
- * @rgn_idx: target region index
- * @srgn_idx: target sub-region index
- * @lun: target logical unit number
- * @m_page: L2P map information data for pre-request
- * @len: length of host-side cached L2P map in m_page
- * @lpn: start LPN of L2P map in m_page
- */
-struct ufshpb_req {
- struct request *req;
- struct bio *bio;
- struct ufshpb_lu *hpb;
- struct list_head list_req;
- union {
- struct {
- struct ufshpb_map_ctx *mctx;
- unsigned int rgn_idx;
- unsigned int srgn_idx;
- unsigned int lun;
- } rb;
- struct {
- struct page *m_page;
- unsigned int len;
- unsigned long lpn;
- } wb;
- };
-};
-
-struct victim_select_info {
- struct list_head lh_lru_rgn; /* LRU list of regions */
- int max_lru_active_cnt; /* supported hpb #region - pinned #region */
- atomic_t active_cnt;
-};
-
-/**
- * ufshpb_params - ufs hpb parameters
- * @requeue_timeout_ms - requeue threshold of wb command (0x2)
- * @activation_thld - min reads [IOs] to activate/update a region
- * @normalization_factor - shift right the region's reads
- * @eviction_thld_enter - min reads [IOs] for the entering region in eviction
- * @eviction_thld_exit - max reads [IOs] for the exiting region in eviction
- * @read_timeout_ms - timeout [ms] from the last read IO to the region
- * @read_timeout_expiries - amount of allowable timeout expireis
- * @timeout_polling_interval_ms - frequency in which timeouts are checked
- * @inflight_map_req - number of inflight map requests
- */
-struct ufshpb_params {
- unsigned int requeue_timeout_ms;
- unsigned int activation_thld;
- unsigned int normalization_factor;
- unsigned int eviction_thld_enter;
- unsigned int eviction_thld_exit;
- unsigned int read_timeout_ms;
- unsigned int read_timeout_expiries;
- unsigned int timeout_polling_interval_ms;
- unsigned int inflight_map_req;
-};
-
-struct ufshpb_stats {
- u64 hit_cnt;
- u64 miss_cnt;
- u64 rcmd_noti_cnt;
- u64 rcmd_active_cnt;
- u64 rcmd_inactive_cnt;
- u64 map_req_cnt;
- u64 pre_req_cnt;
- u64 umap_req_cnt;
-};
-
-struct ufshpb_lu {
- int lun;
- struct scsi_device *sdev_ufs_lu;
-
- spinlock_t rgn_state_lock; /* for protect rgn/srgn state */
- struct ufshpb_region *rgn_tbl;
-
- atomic_t hpb_state;
-
- spinlock_t rsp_list_lock;
- struct list_head lh_act_srgn; /* hold rsp_list_lock */
- struct list_head lh_inact_rgn; /* hold rsp_list_lock */
-
- /* pre request information */
- struct ufshpb_req *pre_req;
- int num_inflight_pre_req;
- int throttle_pre_req;
- int num_inflight_map_req; /* hold param_lock */
- spinlock_t param_lock;
-
- struct list_head lh_pre_req_free;
- int pre_req_max_tr_len;
-
- /* cached L2P map management worker */
- struct work_struct map_work;
-
- /* for selecting victim */
- struct victim_select_info lru_info;
- struct work_struct ufshpb_normalization_work;
- struct delayed_work ufshpb_read_to_work;
- unsigned long work_data_bits;
-#define TIMEOUT_WORK_RUNNING 0
-
- /* pinned region information */
- u32 lu_pinned_start;
- u32 lu_pinned_end;
-
- /* HPB related configuration */
- u32 rgns_per_lu;
- u32 srgns_per_lu;
- u32 last_srgn_entries;
- int srgns_per_rgn;
- u32 srgn_mem_size;
- u32 entries_per_rgn_mask;
- u32 entries_per_rgn_shift;
- u32 entries_per_srgn;
- u32 entries_per_srgn_mask;
- u32 entries_per_srgn_shift;
- u32 pages_per_srgn;
-
- bool is_hcm;
-
- struct ufshpb_stats stats;
- struct ufshpb_params params;
-
- struct kmem_cache *map_req_cache;
- struct kmem_cache *m_page_cache;
-
- struct list_head list_hpb_lu;
-};
-
-struct ufs_hba;
-struct ufshcd_lrb;
-
-#ifndef CONFIG_SCSI_UFS_HPB
-static int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { return 0; }
-static void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) {}
-static void ufshpb_resume(struct ufs_hba *hba) {}
-static void ufshpb_suspend(struct ufs_hba *hba) {}
-static void ufshpb_toggle_state(struct ufs_hba *hba, enum UFSHPB_STATE src, enum UFSHPB_STATE dest) {}
-static void ufshpb_init(struct ufs_hba *hba) {}
-static void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev) {}
-static void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev) {}
-static void ufshpb_remove(struct ufs_hba *hba) {}
-static bool ufshpb_is_allowed(struct ufs_hba *hba) { return false; }
-static void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf) {}
-static void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) {}
-static bool ufshpb_is_legacy(struct ufs_hba *hba) { return false; }
-#else
-int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
-void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
-void ufshpb_resume(struct ufs_hba *hba);
-void ufshpb_suspend(struct ufs_hba *hba);
-void ufshpb_toggle_state(struct ufs_hba *hba, enum UFSHPB_STATE src, enum UFSHPB_STATE dest);
-void ufshpb_init(struct ufs_hba *hba);
-void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev);
-void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev);
-void ufshpb_remove(struct ufs_hba *hba);
-bool ufshpb_is_allowed(struct ufs_hba *hba);
-void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf);
-void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf);
-bool ufshpb_is_legacy(struct ufs_hba *hba);
-extern struct attribute_group ufs_sysfs_hpb_stat_group;
-extern struct attribute_group ufs_sysfs_hpb_param_group;
-#endif
-
-#endif /* End of Header */
diff --git a/drivers/ufs/host/cdns-pltfrm.c b/drivers/ufs/host/cdns-pltfrm.c
index 26761425a76c..2491e7e87028 100644
--- a/drivers/ufs/host/cdns-pltfrm.c
+++ b/drivers/ufs/host/cdns-pltfrm.c
@@ -101,11 +101,10 @@ static void cdns_ufs_set_l4_attr(struct ufs_hba *hba)
}
/**
- * cdns_ufs_set_hclkdiv()
- * Sets HCLKDIV register value based on the core_clk
+ * cdns_ufs_set_hclkdiv() - set HCLKDIV register value based on the core_clk.
* @hba: host controller instance
*
- * Return zero for success and non-zero for failure
+ * Return: zero for success and non-zero for failure.
*/
static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba)
{
@@ -143,12 +142,11 @@ static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba)
}
/**
- * cdns_ufs_hce_enable_notify()
- * Called before and after HCE enable bit is set.
+ * cdns_ufs_hce_enable_notify() - set HCLKDIV register
* @hba: host controller instance
* @status: notify stage (pre, post change)
*
- * Return zero for success and non-zero for failure
+ * Return: zero for success and non-zero for failure.
*/
static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
@@ -160,12 +158,10 @@ static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba,
}
/**
- * cdns_ufs_hibern8_notify()
- * Called around hibern8 enter/exit.
+ * cdns_ufs_hibern8_notify() - save and restore L4 attributes.
* @hba: host controller instance
* @cmd: UIC Command
* @status: notify stage (pre, post change)
- *
*/
static void cdns_ufs_hibern8_notify(struct ufs_hba *hba, enum uic_cmd_dme cmd,
enum ufs_notify_change_status status)
@@ -177,12 +173,11 @@ static void cdns_ufs_hibern8_notify(struct ufs_hba *hba, enum uic_cmd_dme cmd,
}
/**
- * cdns_ufs_link_startup_notify()
- * Called before and after Link startup is carried out.
+ * cdns_ufs_link_startup_notify() - handle link startup.
* @hba: host controller instance
* @status: notify stage (pre, post change)
*
- * Return zero for success and non-zero for failure
+ * Return: zero for success and non-zero for failure.
*/
static int cdns_ufs_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
@@ -212,7 +207,7 @@ static int cdns_ufs_link_startup_notify(struct ufs_hba *hba,
* cdns_ufs_init - performs additional ufs initialization
* @hba: host controller instance
*
- * Returns status of initialization
+ * Return: status of initialization.
*/
static int cdns_ufs_init(struct ufs_hba *hba)
{
@@ -235,7 +230,7 @@ static int cdns_ufs_init(struct ufs_hba *hba)
* cdns_ufs_m31_16nm_phy_initialization - performs m31 phy initialization
* @hba: host controller instance
*
- * Always returns 0
+ * Return: 0 (success).
*/
static int cdns_ufs_m31_16nm_phy_initialization(struct ufs_hba *hba)
{
@@ -284,7 +279,7 @@ MODULE_DEVICE_TABLE(of, cdns_ufs_of_match);
* cdns_ufs_pltfrm_probe - probe routine of the driver
* @pdev: pointer to platform device handle
*
- * Return zero for success and non-zero for failure
+ * Return: zero for success and non-zero for failure.
*/
static int cdns_ufs_pltfrm_probe(struct platform_device *pdev)
{
@@ -308,7 +303,7 @@ static int cdns_ufs_pltfrm_probe(struct platform_device *pdev)
* cdns_ufs_pltfrm_remove - removes the ufs driver
* @pdev: pointer to platform device handle
*
- * Always returns 0
+ * Return: 0 (success).
*/
static int cdns_ufs_pltfrm_remove(struct platform_device *pdev)
{
diff --git a/drivers/ufs/host/tc-dwc-g210-pci.c b/drivers/ufs/host/tc-dwc-g210-pci.c
index f96fe5855841..876781fd6861 100644
--- a/drivers/ufs/host/tc-dwc-g210-pci.c
+++ b/drivers/ufs/host/tc-dwc-g210-pci.c
@@ -51,7 +51,7 @@ static void tc_dwc_g210_pci_remove(struct pci_dev *pdev)
* @pdev: pointer to PCI device handle
* @id: PCI device id
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
static int
tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/ufs/host/tc-dwc-g210.c b/drivers/ufs/host/tc-dwc-g210.c
index deb93dbd83a4..0ac53cc8465e 100644
--- a/drivers/ufs/host/tc-dwc-g210.c
+++ b/drivers/ufs/host/tc-dwc-g210.c
@@ -17,11 +17,10 @@
#include "tc-dwc-g210.h"
/**
- * tc_dwc_g210_setup_40bit_rmmi()
- * This function configures Synopsys TC specific atributes (40-bit RMMI)
+ * tc_dwc_g210_setup_40bit_rmmi() - configure 40-bit RMMI.
* @hba: Pointer to drivers structure
*
- * Returns 0 on success or non-zero value on failure
+ * Return: 0 on success or non-zero value on failure.
*/
static int tc_dwc_g210_setup_40bit_rmmi(struct ufs_hba *hba)
{
@@ -81,11 +80,10 @@ static int tc_dwc_g210_setup_40bit_rmmi(struct ufs_hba *hba)
}
/**
- * tc_dwc_g210_setup_20bit_rmmi_lane0()
- * This function configures Synopsys TC 20-bit RMMI Lane 0
+ * tc_dwc_g210_setup_20bit_rmmi_lane0() - configure 20-bit RMMI Lane 0.
* @hba: Pointer to drivers structure
*
- * Returns 0 on success or non-zero value on failure
+ * Return: 0 on success or non-zero value on failure.
*/
static int tc_dwc_g210_setup_20bit_rmmi_lane0(struct ufs_hba *hba)
{
@@ -134,11 +132,10 @@ static int tc_dwc_g210_setup_20bit_rmmi_lane0(struct ufs_hba *hba)
}
/**
- * tc_dwc_g210_setup_20bit_rmmi_lane1()
- * This function configures Synopsys TC 20-bit RMMI Lane 1
+ * tc_dwc_g210_setup_20bit_rmmi_lane1() - configure 20-bit RMMI Lane 1.
* @hba: Pointer to drivers structure
*
- * Returns 0 on success or non-zero value on failure
+ * Return: 0 on success or non-zero value on failure.
*/
static int tc_dwc_g210_setup_20bit_rmmi_lane1(struct ufs_hba *hba)
{
@@ -211,11 +208,10 @@ out:
}
/**
- * tc_dwc_g210_setup_20bit_rmmi()
- * This function configures Synopsys TC specific atributes (20-bit RMMI)
+ * tc_dwc_g210_setup_20bit_rmmi() - configure 20-bit RMMI.
* @hba: Pointer to drivers structure
*
- * Returns 0 on success or non-zero value on failure
+ * Return: 0 on success or non-zero value on failure.
*/
static int tc_dwc_g210_setup_20bit_rmmi(struct ufs_hba *hba)
{
@@ -251,12 +247,10 @@ out:
}
/**
- * tc_dwc_g210_config_40_bit()
- * This function configures Local (host) Synopsys 40-bit TC specific attributes
- *
+ * tc_dwc_g210_config_40_bit() - configure 40-bit TC specific attributes.
* @hba: Pointer to drivers structure
*
- * Returns 0 on success non-zero value on failure
+ * Return: 0 on success non-zero value on failure.
*/
int tc_dwc_g210_config_40_bit(struct ufs_hba *hba)
{
@@ -283,12 +277,10 @@ out:
EXPORT_SYMBOL(tc_dwc_g210_config_40_bit);
/**
- * tc_dwc_g210_config_20_bit()
- * This function configures Local (host) Synopsys 20-bit TC specific attributes
- *
+ * tc_dwc_g210_config_20_bit() - configure 20-bit TC specific attributes.
* @hba: Pointer to drivers structure
*
- * Returns 0 on success non-zero value on failure
+ * Return: 0 on success non-zero value on failure.
*/
int tc_dwc_g210_config_20_bit(struct ufs_hba *hba)
{
diff --git a/drivers/ufs/host/ti-j721e-ufs.c b/drivers/ufs/host/ti-j721e-ufs.c
index 122d650d0810..117eb7da92ac 100644
--- a/drivers/ufs/host/ti-j721e-ufs.c
+++ b/drivers/ufs/host/ti-j721e-ufs.c
@@ -81,6 +81,8 @@ static const struct of_device_id ti_j721e_ufs_of_match[] = {
{ },
};
+MODULE_DEVICE_TABLE(of, ti_j721e_ufs_of_match);
+
static struct platform_driver ti_j721e_ufs_driver = {
.probe = ti_j721e_ufs_probe,
.remove = ti_j721e_ufs_remove,
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index e68b05976f9e..2383ecd88f1c 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -14,6 +14,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
@@ -27,8 +28,14 @@
#include <ufs/unipro.h>
#include "ufs-mediatek.h"
+static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
+
#define CREATE_TRACE_POINTS
#include "ufs-mediatek-trace.h"
+#undef CREATE_TRACE_POINTS
+
+#define MAX_SUPP_MAC 64
+#define MCQ_QUEUE_OFFSET(c) ((((c) >> 16) & 0xFF) * 0x200)
static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
{ .wmanufacturerid = UFS_ANY_VENDOR,
@@ -659,7 +666,7 @@ static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
* @on: If true, enable clocks else disable them.
* @status: PRE_CHANGE or POST_CHANGE notify
*
- * Returns 0 on success, non-zero on failure.
+ * Return: 0 on success, non-zero on failure.
*/
static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
@@ -840,6 +847,37 @@ static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
}
}
+static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct platform_device *pdev;
+ int i;
+ int irq;
+
+ host->mcq_nr_intr = UFSHCD_MAX_Q_NR;
+ pdev = container_of(hba->dev, struct platform_device, dev);
+
+ for (i = 0; i < host->mcq_nr_intr; i++) {
+ /* irq index 0 is legacy irq, sq/cq irq start from index 1 */
+ irq = platform_get_irq(pdev, i + 1);
+ if (irq < 0) {
+ host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
+ goto failed;
+ }
+ host->mcq_intr_info[i].hba = hba;
+ host->mcq_intr_info[i].irq = irq;
+ dev_info(hba->dev, "get platform mcq irq: %d, %d\n", i, irq);
+ }
+
+ return;
+failed:
+ /* invalidate irq info */
+ for (i = 0; i < host->mcq_nr_intr; i++)
+ host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
+
+ host->mcq_nr_intr = 0;
+}
+
/**
* ufs_mtk_init - find other essential mmio bases
* @hba: host controller instance
@@ -847,7 +885,7 @@ static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
* Binds PHY with controller and powers up PHY enabling clocks
* and regulators.
*
- * Returns -EPROBE_DEFER if binding fails, returns negative error
+ * Return: -EPROBE_DEFER if binding fails, returns negative error
* on phy power up failure and returns zero on success.
*/
static int ufs_mtk_init(struct ufs_hba *hba)
@@ -876,6 +914,8 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Initialize host capability */
ufs_mtk_init_host_caps(hba);
+ ufs_mtk_init_mcq_irq(hba);
+
err = ufs_mtk_bind_mphy(hba);
if (err)
goto out_variant_clear;
@@ -1173,7 +1213,17 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
else
return err;
- err = ufshcd_make_hba_operational(hba);
+ if (!hba->mcq_enabled) {
+ err = ufshcd_make_hba_operational(hba);
+ } else {
+ ufs_mtk_config_mcq(hba, false);
+ ufshcd_mcq_make_queues_operational(hba);
+ ufshcd_mcq_config_mac(hba, hba->nutrs);
+ /* Enable MCQ mode */
+ ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
+ REG_UFS_MEM_CFG);
+ }
+
if (err)
return err;
@@ -1497,6 +1547,121 @@ static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
return 0;
}
+static int ufs_mtk_get_hba_mac(struct ufs_hba *hba)
+{
+ return MAX_SUPP_MAC;
+}
+
+static int ufs_mtk_op_runtime_config(struct ufs_hba *hba)
+{
+ struct ufshcd_mcq_opr_info_t *opr;
+ int i;
+
+ hba->mcq_opr[OPR_SQD].offset = REG_UFS_MTK_SQD;
+ hba->mcq_opr[OPR_SQIS].offset = REG_UFS_MTK_SQIS;
+ hba->mcq_opr[OPR_CQD].offset = REG_UFS_MTK_CQD;
+ hba->mcq_opr[OPR_CQIS].offset = REG_UFS_MTK_CQIS;
+
+ for (i = 0; i < OPR_MAX; i++) {
+ opr = &hba->mcq_opr[i];
+ opr->stride = REG_UFS_MCQ_STRIDE;
+ opr->base = hba->mmio_base + opr->offset;
+ }
+
+ return 0;
+}
+
+static int ufs_mtk_mcq_config_resource(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ /* fail mcq initialization if interrupt is not filled properly */
+ if (!host->mcq_nr_intr) {
+ dev_info(hba->dev, "IRQs not ready. MCQ disabled.");
+ return -EINVAL;
+ }
+
+ hba->mcq_base = hba->mmio_base + MCQ_QUEUE_OFFSET(hba->mcq_capabilities);
+ return 0;
+}
+
+static irqreturn_t ufs_mtk_mcq_intr(int irq, void *__intr_info)
+{
+ struct ufs_mtk_mcq_intr_info *mcq_intr_info = __intr_info;
+ struct ufs_hba *hba = mcq_intr_info->hba;
+ struct ufs_hw_queue *hwq;
+ u32 events;
+ int qid = mcq_intr_info->qid;
+
+ hwq = &hba->uhq[qid];
+
+ events = ufshcd_mcq_read_cqis(hba, qid);
+ if (events)
+ ufshcd_mcq_write_cqis(hba, events, qid);
+
+ if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
+ ufshcd_mcq_poll_cqe_lock(hba, hwq);
+
+ return IRQ_HANDLED;
+}
+
+static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ u32 irq, i;
+ int ret;
+
+ for (i = 0; i < host->mcq_nr_intr; i++) {
+ irq = host->mcq_intr_info[i].irq;
+ if (irq == MTK_MCQ_INVALID_IRQ) {
+ dev_err(hba->dev, "invalid irq. %d\n", i);
+ return -ENOPARAM;
+ }
+
+ host->mcq_intr_info[i].qid = i;
+ ret = devm_request_irq(hba->dev, irq, ufs_mtk_mcq_intr, 0, UFSHCD,
+ &host->mcq_intr_info[i]);
+
+ dev_dbg(hba->dev, "request irq %d intr %s\n", irq, ret ? "failed" : "");
+
+ if (ret) {
+ dev_err(hba->dev, "Cannot request irq %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ int ret = 0;
+
+ if (!host->mcq_set_intr) {
+ /* Disable irq option register */
+ ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, 0, REG_UFS_MMIO_OPT_CTRL_0);
+
+ if (irq) {
+ ret = ufs_mtk_config_mcq_irq(hba);
+ if (ret)
+ return ret;
+ }
+
+ host->mcq_set_intr = true;
+ }
+
+ ufshcd_rmwl(hba, MCQ_AH8, MCQ_AH8, REG_UFS_MMIO_OPT_CTRL_0);
+ ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, MCQ_MULTI_INTR_EN, REG_UFS_MMIO_OPT_CTRL_0);
+
+ return 0;
+}
+
+static int ufs_mtk_config_esi(struct ufs_hba *hba)
+{
+ return ufs_mtk_config_mcq(hba, true);
+}
+
/*
* struct ufs_hba_mtk_vops - UFS MTK specific variant operations
*
@@ -1520,13 +1685,18 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.event_notify = ufs_mtk_event_notify,
.config_scaling_param = ufs_mtk_config_scaling_param,
.clk_scale_notify = ufs_mtk_clk_scale_notify,
+ /* mcq vops */
+ .get_hba_mac = ufs_mtk_get_hba_mac,
+ .op_runtime_config = ufs_mtk_op_runtime_config,
+ .mcq_config_resource = ufs_mtk_mcq_config_resource,
+ .config_esi = ufs_mtk_config_esi,
};
/**
* ufs_mtk_probe - probe routine of the driver
* @pdev: pointer to Platform device handle
*
- * Return zero for success and non-zero for failure
+ * Return: zero for success and non-zero for failure.
*/
static int ufs_mtk_probe(struct platform_device *pdev)
{
@@ -1566,7 +1736,7 @@ skip_reset:
out:
if (err)
- dev_info(dev, "probe failed %d\n", err);
+ dev_err(dev, "probe failed %d\n", err);
of_node_put(reset_node);
return err;
diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
index 2fc6d7b87694..f76e80d91729 100644
--- a/drivers/ufs/host/ufs-mediatek.h
+++ b/drivers/ufs/host/ufs-mediatek.h
@@ -11,10 +11,26 @@
#include <linux/soc/mediatek/mtk_sip_svc.h>
/*
+ * MCQ define and struct
+ */
+#define UFSHCD_MAX_Q_NR 8
+#define MTK_MCQ_INVALID_IRQ 0xFFFF
+
+/* REG_UFS_MMIO_OPT_CTRL_0 160h */
+#define EHS_EN BIT(0)
+#define PFM_IMPV BIT(1)
+#define MCQ_MULTI_INTR_EN BIT(2)
+#define MCQ_CMB_INTR_EN BIT(3)
+#define MCQ_AH8 BIT(4)
+
+#define MCQ_INTR_EN_MSK (MCQ_MULTI_INTR_EN | MCQ_CMB_INTR_EN)
+
+/*
* Vendor specific UFSHCI Registers
*/
#define REG_UFS_XOUFS_CTRL 0x140
#define REG_UFS_REFCLK_CTRL 0x144
+#define REG_UFS_MMIO_OPT_CTRL_0 0x160
#define REG_UFS_EXTREG 0x2100
#define REG_UFS_MPHYCTRL 0x2200
#define REG_UFS_MTK_IP_VER 0x2240
@@ -26,6 +42,13 @@
#define REG_UFS_DEBUG_SEL_B2 0x22D8
#define REG_UFS_DEBUG_SEL_B3 0x22DC
+#define REG_UFS_MTK_SQD 0x2800
+#define REG_UFS_MTK_SQIS 0x2814
+#define REG_UFS_MTK_CQD 0x281C
+#define REG_UFS_MTK_CQIS 0x2824
+
+#define REG_UFS_MCQ_STRIDE 0x30
+
/*
* Ref-clk control
*
@@ -136,6 +159,12 @@ struct ufs_mtk_hw_ver {
u8 major;
};
+struct ufs_mtk_mcq_intr_info {
+ struct ufs_hba *hba;
+ u32 irq;
+ u8 qid;
+};
+
struct ufs_mtk_host {
struct phy *mphy;
struct pm_qos_request pm_qos_req;
@@ -155,6 +184,10 @@ struct ufs_mtk_host {
u16 ref_clk_ungating_wait_us;
u16 ref_clk_gating_wait_us;
u32 ip_ver;
+
+ bool mcq_set_intr;
+ int mcq_nr_intr;
+ struct ufs_mtk_mcq_intr_info mcq_intr_info[UFSHCD_MAX_Q_NR];
};
/*
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index c1557d21b027..d1149b1c3ed5 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -7,6 +7,7 @@
#include <linux/time.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/interconnect.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -46,6 +47,49 @@ enum {
TSTBUS_MAX,
};
+#define QCOM_UFS_MAX_GEAR 4
+#define QCOM_UFS_MAX_LANE 2
+
+enum {
+ MODE_MIN,
+ MODE_PWM,
+ MODE_HS_RA,
+ MODE_HS_RB,
+ MODE_MAX,
+};
+
+static const struct __ufs_qcom_bw_table {
+ u32 mem_bw;
+ u32 cfg_bw;
+} ufs_qcom_bw_table[MODE_MAX + 1][QCOM_UFS_MAX_GEAR + 1][QCOM_UFS_MAX_LANE + 1] = {
+ [MODE_MIN][0][0] = { 0, 0 }, /* Bandwidth values in KB/s */
+ [MODE_PWM][UFS_PWM_G1][UFS_LANE_1] = { 922, 1000 },
+ [MODE_PWM][UFS_PWM_G2][UFS_LANE_1] = { 1844, 1000 },
+ [MODE_PWM][UFS_PWM_G3][UFS_LANE_1] = { 3688, 1000 },
+ [MODE_PWM][UFS_PWM_G4][UFS_LANE_1] = { 7376, 1000 },
+ [MODE_PWM][UFS_PWM_G1][UFS_LANE_2] = { 1844, 1000 },
+ [MODE_PWM][UFS_PWM_G2][UFS_LANE_2] = { 3688, 1000 },
+ [MODE_PWM][UFS_PWM_G3][UFS_LANE_2] = { 7376, 1000 },
+ [MODE_PWM][UFS_PWM_G4][UFS_LANE_2] = { 14752, 1000 },
+ [MODE_HS_RA][UFS_HS_G1][UFS_LANE_1] = { 127796, 1000 },
+ [MODE_HS_RA][UFS_HS_G2][UFS_LANE_1] = { 255591, 1000 },
+ [MODE_HS_RA][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
+ [MODE_HS_RA][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
+ [MODE_HS_RA][UFS_HS_G1][UFS_LANE_2] = { 255591, 1000 },
+ [MODE_HS_RA][UFS_HS_G2][UFS_LANE_2] = { 511181, 1000 },
+ [MODE_HS_RA][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
+ [MODE_HS_RA][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
+ [MODE_HS_RB][UFS_HS_G1][UFS_LANE_1] = { 149422, 1000 },
+ [MODE_HS_RB][UFS_HS_G2][UFS_LANE_1] = { 298189, 1000 },
+ [MODE_HS_RB][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
+ [MODE_HS_RB][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
+ [MODE_HS_RB][UFS_HS_G1][UFS_LANE_2] = { 298189, 1000 },
+ [MODE_HS_RB][UFS_HS_G2][UFS_LANE_2] = { 596378, 1000 },
+ [MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
+ [MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
+ [MODE_MAX][0][0] = { 7643136, 307200 },
+};
+
static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
@@ -485,7 +529,7 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
}
/*
- * Returns zero for success and non-zero in case of a failure
+ * Return: zero for success and non-zero in case of a failure.
*/
static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
u32 hs, u32 rate, bool update_link_startup_timer)
@@ -789,6 +833,51 @@ static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
}
}
+static int ufs_qcom_icc_set_bw(struct ufs_qcom_host *host, u32 mem_bw, u32 cfg_bw)
+{
+ struct device *dev = host->hba->dev;
+ int ret;
+
+ ret = icc_set_bw(host->icc_ddr, 0, mem_bw);
+ if (ret < 0) {
+ dev_err(dev, "failed to set bandwidth request: %d\n", ret);
+ return ret;
+ }
+
+ ret = icc_set_bw(host->icc_cpu, 0, cfg_bw);
+ if (ret < 0) {
+ dev_err(dev, "failed to set bandwidth request: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct __ufs_qcom_bw_table ufs_qcom_get_bw_table(struct ufs_qcom_host *host)
+{
+ struct ufs_pa_layer_attr *p = &host->dev_req_params;
+ int gear = max_t(u32, p->gear_rx, p->gear_tx);
+ int lane = max_t(u32, p->lane_rx, p->lane_tx);
+
+ if (ufshcd_is_hs_mode(p)) {
+ if (p->hs_rate == PA_HS_MODE_B)
+ return ufs_qcom_bw_table[MODE_HS_RB][gear][lane];
+ else
+ return ufs_qcom_bw_table[MODE_HS_RA][gear][lane];
+ } else {
+ return ufs_qcom_bw_table[MODE_PWM][gear][lane];
+ }
+}
+
+static int ufs_qcom_icc_update_bw(struct ufs_qcom_host *host)
+{
+ struct __ufs_qcom_bw_table bw_table;
+
+ bw_table = ufs_qcom_get_bw_table(host);
+
+ return ufs_qcom_icc_set_bw(host, bw_table.mem_bw, bw_table.cfg_bw);
+}
+
static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
struct ufs_pa_layer_attr *dev_max_params,
@@ -852,6 +941,8 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
memcpy(&host->dev_req_params,
dev_req_params, sizeof(*dev_req_params));
+ ufs_qcom_icc_update_bw(host);
+
/* disable the device ref clock if entered PWM mode */
if (ufshcd_is_hs_mode(&hba->pwr_info) &&
!ufshcd_is_hs_mode(dev_req_params))
@@ -964,7 +1055,7 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
* @on: If true, enable clocks else disable them.
* @status: PRE_CHANGE or POST_CHANGE notify
*
- * Returns 0 on success, non-zero on failure.
+ * Return: 0 on success, non-zero on failure.
*/
static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
@@ -981,7 +1072,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
switch (status) {
case PRE_CHANGE:
- if (!on) {
+ if (on) {
+ ufs_qcom_icc_update_bw(host);
+ } else {
if (!ufs_qcom_is_link_active(hba)) {
/* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(host, false);
@@ -993,6 +1086,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
/* enable the device ref clock for HS mode*/
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
+ } else {
+ ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MIN][0][0].mem_bw,
+ ufs_qcom_bw_table[MODE_MIN][0][0].cfg_bw);
}
break;
}
@@ -1031,6 +1127,34 @@ static const struct reset_control_ops ufs_qcom_reset_ops = {
.deassert = ufs_qcom_reset_deassert,
};
+static int ufs_qcom_icc_init(struct ufs_qcom_host *host)
+{
+ struct device *dev = host->hba->dev;
+ int ret;
+
+ host->icc_ddr = devm_of_icc_get(dev, "ufs-ddr");
+ if (IS_ERR(host->icc_ddr))
+ return dev_err_probe(dev, PTR_ERR(host->icc_ddr),
+ "failed to acquire interconnect path\n");
+
+ host->icc_cpu = devm_of_icc_get(dev, "cpu-ufs");
+ if (IS_ERR(host->icc_cpu))
+ return dev_err_probe(dev, PTR_ERR(host->icc_cpu),
+ "failed to acquire interconnect path\n");
+
+ /*
+ * Set Maximum bandwidth vote before initializing the UFS controller and
+ * device. Ideally, a minimal interconnect vote would suffice for the
+ * initialization, but a max vote would allow faster initialization.
+ */
+ ret = ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MAX][0][0].mem_bw,
+ ufs_qcom_bw_table[MODE_MAX][0][0].cfg_bw);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to set bandwidth request\n");
+
+ return 0;
+}
+
/**
* ufs_qcom_init - bind phy with controller
* @hba: host controller instance
@@ -1038,7 +1162,7 @@ static const struct reset_control_ops ufs_qcom_reset_ops = {
* Binds PHY with controller and powers up PHY enabling clocks
* and regulators.
*
- * Returns -EPROBE_DEFER if binding fails, returns negative error
+ * Return: -EPROBE_DEFER if binding fails, returns negative error
* on phy power up failure and returns zero on success.
*/
static int ufs_qcom_init(struct ufs_hba *hba)
@@ -1085,6 +1209,10 @@ static int ufs_qcom_init(struct ufs_hba *hba)
}
}
+ err = ufs_qcom_icc_init(host);
+ if (err)
+ goto out_variant_clear;
+
host->device_reset = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(host->device_reset)) {
@@ -1254,6 +1382,10 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
int err = 0;
+ /* check the host controller state before sending hibern8 cmd */
+ if (!ufshcd_is_hba_active(hba))
+ return 0;
+
if (status == PRE_CHANGE) {
err = ufshcd_uic_hibern8_enter(hba);
if (err)
@@ -1282,6 +1414,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
dev_req_params->pwr_rx,
dev_req_params->hs_rate,
false);
+ ufs_qcom_icc_update_bw(host);
ufshcd_uic_hibern8_exit(hba);
}
@@ -1483,6 +1616,7 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
struct devfreq_simple_ondemand_data *d)
{
p->polling_ms = 60;
+ p->timer = DEVFREQ_TIMER_DELAYED;
d->upthreshold = 70;
d->downdifferential = 5;
}
@@ -1643,11 +1777,12 @@ static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
ufshcd_mcq_config_esi(hba, msg);
}
-static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *__hba)
+static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
{
- struct ufs_hba *hba = __hba;
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- u32 id = irq - host->esi_base;
+ struct msi_desc *desc = data;
+ struct device *dev = msi_desc_to_dev(desc);
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 id = desc->msi_index;
struct ufs_hw_queue *hwq = &hba->uhq[id];
ufshcd_mcq_write_cqis(hba, 0x1, id);
@@ -1665,8 +1800,6 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
if (host->esi_enabled)
return 0;
- else if (host->esi_base < 0)
- return -EINVAL;
/*
* 1. We only handle CQs as of now.
@@ -1675,16 +1808,16 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
ret = platform_msi_domain_alloc_irqs(hba->dev, nr_irqs,
ufs_qcom_write_msi_msg);
- if (ret)
+ if (ret) {
+ dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
goto out;
+ }
+ msi_lock_descs(hba->dev);
msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
- if (!desc->msi_index)
- host->esi_base = desc->irq;
-
ret = devm_request_irq(hba->dev, desc->irq,
ufs_qcom_mcq_esi_handler,
- IRQF_SHARED, "qcom-mcq-esi", hba);
+ IRQF_SHARED, "qcom-mcq-esi", desc);
if (ret) {
dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n",
__func__, desc->irq, ret);
@@ -1692,14 +1825,17 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
break;
}
}
+ msi_unlock_descs(hba->dev);
if (ret) {
/* Rewind */
+ msi_lock_descs(hba->dev);
msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
if (desc == failed_desc)
break;
devm_free_irq(hba->dev, desc->irq, hba);
}
+ msi_unlock_descs(hba->dev);
platform_msi_domain_free_irqs(hba->dev);
} else {
if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
@@ -1712,12 +1848,8 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
}
out:
- if (ret) {
- host->esi_base = -1;
- dev_warn(hba->dev, "Failed to request Platform MSI %d\n", ret);
- } else {
+ if (!ret)
host->esi_enabled = true;
- }
return ret;
}
@@ -1757,7 +1889,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
* ufs_qcom_probe - probe routine of the driver
* @pdev: pointer to Platform device handle
*
- * Return zero for success and non-zero for failure
+ * Return: zero for success and non-zero for failure.
*/
static int ufs_qcom_probe(struct platform_device *pdev)
{
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index 6289ad5a42d0..d6f8e74bd538 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -206,6 +206,9 @@ struct ufs_qcom_host {
struct clk *tx_l1_sync_clk;
bool is_lane_clks_enabled;
+ struct icc_path *icc_ddr;
+ struct icc_path *icc_cpu;
+
#ifdef CONFIG_SCSI_UFS_CRYPTO
struct qcom_ice *ice;
#endif
@@ -226,7 +229,6 @@ struct ufs_qcom_host {
u32 hs_gear;
- int esi_base;
bool esi_enabled;
};
diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c
index ab0652d8705a..cc94970b86c9 100644
--- a/drivers/ufs/host/ufs-renesas.c
+++ b/drivers/ufs/host/ufs-renesas.c
@@ -12,7 +12,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <ufs/ufshcd.h>
diff --git a/drivers/ufs/host/ufshcd-dwc.c b/drivers/ufs/host/ufshcd-dwc.c
index e28a67e1e314..21b1cf912dcc 100644
--- a/drivers/ufs/host/ufshcd-dwc.c
+++ b/drivers/ufs/host/ufshcd-dwc.c
@@ -34,9 +34,7 @@ int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba,
EXPORT_SYMBOL(ufshcd_dwc_dme_set_attrs);
/**
- * ufshcd_dwc_program_clk_div()
- * This function programs the clk divider value. This value is needed to
- * provide 1 microsecond tick to unipro layer.
+ * ufshcd_dwc_program_clk_div() - program clock divider.
* @hba: Private Structure pointer
* @divider_val: clock divider value to be programmed
*
@@ -47,11 +45,10 @@ static void ufshcd_dwc_program_clk_div(struct ufs_hba *hba, u32 divider_val)
}
/**
- * ufshcd_dwc_link_is_up()
- * Check if link is up
+ * ufshcd_dwc_link_is_up() - check if link is up.
* @hba: private structure pointer
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
static int ufshcd_dwc_link_is_up(struct ufs_hba *hba)
{
@@ -68,7 +65,9 @@ static int ufshcd_dwc_link_is_up(struct ufs_hba *hba)
}
/**
- * ufshcd_dwc_connection_setup()
+ * ufshcd_dwc_connection_setup() - configure unipro attributes.
+ * @hba: pointer to drivers private data
+ *
* This function configures both the local side (host) and the peer side
* (device) unipro attributes to establish the connection to application/
* cport.
@@ -76,9 +75,7 @@ static int ufshcd_dwc_link_is_up(struct ufs_hba *hba)
* have this connection setup on reset. But invoking this function does no
* harm and should be fine even working with any ufs device.
*
- * @hba: pointer to drivers private data
- *
- * Returns 0 on success non-zero value on failure
+ * Return: 0 on success non-zero value on failure.
*/
static int ufshcd_dwc_connection_setup(struct ufs_hba *hba)
{
@@ -107,12 +104,11 @@ static int ufshcd_dwc_connection_setup(struct ufs_hba *hba)
}
/**
- * ufshcd_dwc_link_startup_notify()
- * UFS Host DWC specific link startup sequence
+ * ufshcd_dwc_link_startup_notify() - program clock divider.
* @hba: private structure pointer
* @status: Callback notify status
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index cf3987773051..248a49e5e7f3 100644
--- a/drivers/ufs/host/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -524,7 +524,7 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
* @pdev: pointer to PCI device handle
* @id: PCI device id
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
static int
ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -590,6 +590,7 @@ static const struct dev_pm_ops ufshcd_pci_pm_ops = {
};
static const struct pci_device_id ufshcd_pci_tbl[] = {
+ { PCI_VENDOR_ID_REDHAT, 0x0013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
index 0b7430033047..797a4dfe45d9 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.c
+++ b/drivers/ufs/host/ufshcd-pltfrm.c
@@ -166,6 +166,8 @@ EXPORT_SYMBOL_GPL(ufshcd_populate_vreg);
* If any of the supplies are not defined it is assumed that they are always-on
* and hence return zero. If the property is defined but parsing is failed
* then return corresponding error.
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_parse_regulator_info(struct ufs_hba *hba)
{
@@ -212,7 +214,7 @@ static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
* @dev_max: pointer to device attributes
* @agreed_pwr: returned agreed attributes
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *pltfrm_param,
const struct ufs_pa_layer_attr *dev_max,
@@ -305,8 +307,8 @@ EXPORT_SYMBOL_GPL(ufshcd_get_pwr_dev_param);
void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param)
{
*dev_param = (struct ufs_dev_params){
- .tx_lanes = 2,
- .rx_lanes = 2,
+ .tx_lanes = UFS_LANE_2,
+ .rx_lanes = UFS_LANE_2,
.hs_rx_gear = UFS_HS_G3,
.hs_tx_gear = UFS_HS_G3,
.pwm_rx_gear = UFS_PWM_G4,
@@ -326,7 +328,7 @@ EXPORT_SYMBOL_GPL(ufshcd_init_pwr_dev_param);
* @pdev: pointer to Platform device handle
* @vops: pointer to variant ops
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
int ufshcd_pltfrm_init(struct platform_device *pdev,
const struct ufs_hba_variant_ops *vops)
@@ -373,7 +375,8 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
err = ufshcd_init(hba, mmio_base, irq);
if (err) {
- dev_err(dev, "Initialization failed\n");
+ dev_err_probe(dev, err, "Initialization failed with error %d\n",
+ err);
goto dealloc_host;
}
diff --git a/drivers/usb/dwc3/dwc3-octeon.c b/drivers/usb/dwc3/dwc3-octeon.c
index ff01f2c17452..6010135e1acc 100644
--- a/drivers/usb/dwc3/dwc3-octeon.c
+++ b/drivers/usb/dwc3/dwc3-octeon.c
@@ -13,7 +13,9 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
/*
* USB Control Register
diff --git a/drivers/usb/typec/ucsi/debugfs.c b/drivers/usb/typec/ucsi/debugfs.c
index 0c7bf88d4a7f..f67733cecfdf 100644
--- a/drivers/usb/typec/ucsi/debugfs.c
+++ b/drivers/usb/typec/ucsi/debugfs.c
@@ -84,6 +84,9 @@ void ucsi_debugfs_register(struct ucsi *ucsi)
void ucsi_debugfs_unregister(struct ucsi *ucsi)
{
+ if (IS_ERR_OR_NULL(ucsi) || !ucsi->debugfs)
+ return;
+
debugfs_remove_recursive(ucsi->debugfs->dentry);
kfree(ucsi->debugfs);
}
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index b53420e874ac..ca56242972b3 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -100,9 +100,6 @@ struct mlx5_vdpa_dev {
bool suspended;
};
-int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid);
-int mlx5_vdpa_dealloc_pd(struct mlx5_vdpa_dev *dev, u32 pdn, u16 uid);
-int mlx5_vdpa_get_null_mkey(struct mlx5_vdpa_dev *dev, u32 *null_mkey);
int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn);
void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn);
int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn);
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index d343af4fa60e..76d41058add9 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -18,6 +18,7 @@
#include <linux/vdpa.h>
#include <linux/vhost_iotlb.h>
#include <uapi/linux/vdpa.h>
+#include <uapi/linux/vhost_types.h>
#include "vdpa_sim.h"
@@ -410,6 +411,11 @@ static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
return vdpasim->dev_attr.supported_features;
}
+static u64 vdpasim_get_backend_features(const struct vdpa_device *vdpa)
+{
+ return BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK);
+}
+
static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -733,6 +739,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.get_vq_align = vdpasim_get_vq_align,
.get_vq_group = vdpasim_get_vq_group,
.get_device_features = vdpasim_get_device_features,
+ .get_backend_features = vdpasim_get_backend_features,
.set_driver_features = vdpasim_set_driver_features,
.get_driver_features = vdpasim_get_driver_features,
.set_config_cb = vdpasim_set_config_cb,
@@ -770,6 +777,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.get_vq_align = vdpasim_get_vq_align,
.get_vq_group = vdpasim_get_vq_group,
.get_device_features = vdpasim_get_device_features,
+ .get_backend_features = vdpasim_get_backend_features,
.set_driver_features = vdpasim_set_driver_features,
.get_driver_features = vdpasim_get_driver_features,
.set_config_cb = vdpasim_set_config_cb,
diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
index e4490639d383..9d2738e10c0b 100644
--- a/drivers/vfio/mdev/mdev_sysfs.c
+++ b/drivers/vfio/mdev/mdev_sysfs.c
@@ -233,7 +233,8 @@ int parent_create_sysfs_files(struct mdev_parent *parent)
out_err:
while (--i >= 0)
mdev_type_remove(parent->types[i]);
- return 0;
+ kset_unregister(parent->mdev_types_kset);
+ return ret;
}
static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/vfio/pci/pds/Kconfig b/drivers/vfio/pci/pds/Kconfig
index 407b3fd32733..6eceef7b028a 100644
--- a/drivers/vfio/pci/pds/Kconfig
+++ b/drivers/vfio/pci/pds/Kconfig
@@ -3,7 +3,7 @@
config PDS_VFIO_PCI
tristate "VFIO support for PDS PCI devices"
- depends on PDS_CORE
+ depends on PDS_CORE && PCI_IOV
select VFIO_PCI_CORE
help
This provides generic PCI support for PDS devices using the VFIO
diff --git a/drivers/vfio/pci/pds/vfio_dev.c b/drivers/vfio/pci/pds/vfio_dev.c
index b46174f5eb09..649b18ee394b 100644
--- a/drivers/vfio/pci/pds/vfio_dev.c
+++ b/drivers/vfio/pci/pds/vfio_dev.c
@@ -162,7 +162,7 @@ static int pds_vfio_init_device(struct vfio_device *vdev)
pci_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
dev_dbg(&pdev->dev,
"%s: PF %#04x VF %#04x vf_id %d domain %d pds_vfio %p\n",
- __func__, pci_dev_id(pdev->physfn), pci_id, vf_id,
+ __func__, pci_dev_id(pci_physfn(pdev)), pci_id, vf_id,
pci_domain_nr(pdev->bus), pds_vfio);
return 0;
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index b43e8680eee8..78379ffd2336 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -403,6 +403,17 @@ static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
return 0;
}
+static u64 vhost_vdpa_get_backend_features(const struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (!ops->get_backend_features)
+ return 0;
+ else
+ return ops->get_backend_features(vdpa);
+}
+
static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
{
struct vdpa_device *vdpa = v->vdpa;
@@ -680,7 +691,8 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
return -EFAULT;
if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
BIT_ULL(VHOST_BACKEND_F_SUSPEND) |
- BIT_ULL(VHOST_BACKEND_F_RESUME)))
+ BIT_ULL(VHOST_BACKEND_F_RESUME) |
+ BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK)))
return -EOPNOTSUPP;
if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) &&
!vhost_vdpa_can_suspend(v))
@@ -741,6 +753,7 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
if (vhost_vdpa_can_resume(v))
features |= BIT_ULL(VHOST_BACKEND_F_RESUME);
+ features |= vhost_vdpa_get_backend_features(v);
if (copy_to_user(featurep, &features, sizeof(features)))
r = -EFAULT;
break;
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index 955d938eb663..7b8fd977f71c 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -123,8 +123,18 @@ static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
done += partlen;
len -= partlen;
ptr += partlen;
+ iov->consumed += partlen;
+ iov->iov[iov->i].iov_len -= partlen;
+ iov->iov[iov->i].iov_base += partlen;
- vringh_kiov_advance(iov, partlen);
+ if (!iov->iov[iov->i].iov_len) {
+ /* Fix up old iov element then increment. */
+ iov->iov[iov->i].iov_len = iov->consumed;
+ iov->iov[iov->i].iov_base -= iov->consumed;
+
+ iov->consumed = 0;
+ iov->i++;
+ }
}
return done;
}
diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
index d3bea42407f1..d28c30b2a35d 100644
--- a/drivers/video/backlight/gpio_backlight.c
+++ b/drivers/video/backlight/gpio_backlight.c
@@ -87,8 +87,7 @@ static int gpio_backlight_probe(struct platform_device *pdev)
/* Not booted with device tree or no phandle link to the node */
bl->props.power = def_value ? FB_BLANK_UNBLANK
: FB_BLANK_POWERDOWN;
- else if (gpiod_get_direction(gbl->gpiod) == 0 &&
- gpiod_get_value_cansleep(gbl->gpiod) == 0)
+ else if (gpiod_get_value_cansleep(gbl->gpiod) == 0)
bl->props.power = FB_BLANK_POWERDOWN;
else
bl->props.power = FB_BLANK_UNBLANK;
diff --git a/drivers/video/backlight/led_bl.c b/drivers/video/backlight/led_bl.c
index 3259292fda76..032f8bddf872 100644
--- a/drivers/video/backlight/led_bl.c
+++ b/drivers/video/backlight/led_bl.c
@@ -243,7 +243,7 @@ MODULE_DEVICE_TABLE(of, led_bl_of_match);
static struct platform_driver led_bl_driver = {
.driver = {
.name = "led-backlight",
- .of_match_table = of_match_ptr(led_bl_of_match),
+ .of_match_table = led_bl_of_match,
},
.probe = led_bl_probe,
.remove_new = led_bl_remove,
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index 1c9e921bca14..da1f124db69c 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -71,6 +71,7 @@ struct lp855x {
struct device *dev;
struct lp855x_platform_data *pdata;
struct pwm_device *pwm;
+ bool needs_pwm_init;
struct regulator *supply; /* regulator for VDD input */
struct regulator *enable; /* regulator for EN/VDDIO input */
};
@@ -216,16 +217,24 @@ err:
return ret;
}
-static void lp855x_pwm_ctrl(struct lp855x *lp, int br, int max_br)
+static int lp855x_pwm_ctrl(struct lp855x *lp, int br, int max_br)
{
struct pwm_state state;
- pwm_get_state(lp->pwm, &state);
+ if (lp->needs_pwm_init) {
+ pwm_init_state(lp->pwm, &state);
+ /* Legacy platform data compatibility */
+ if (lp->pdata->period_ns > 0)
+ state.period = lp->pdata->period_ns;
+ lp->needs_pwm_init = false;
+ } else {
+ pwm_get_state(lp->pwm, &state);
+ }
state.duty_cycle = div_u64(br * state.period, max_br);
state.enabled = state.duty_cycle;
- pwm_apply_state(lp->pwm, &state);
+ return pwm_apply_state(lp->pwm, &state);
}
static int lp855x_bl_update_status(struct backlight_device *bl)
@@ -237,11 +246,12 @@ static int lp855x_bl_update_status(struct backlight_device *bl)
brightness = 0;
if (lp->mode == PWM_BASED)
- lp855x_pwm_ctrl(lp, brightness, bl->props.max_brightness);
+ return lp855x_pwm_ctrl(lp, brightness,
+ bl->props.max_brightness);
else if (lp->mode == REGISTER_BASED)
- lp855x_write_byte(lp, lp->cfg->reg_brightness, (u8)brightness);
-
- return 0;
+ return lp855x_write_byte(lp, lp->cfg->reg_brightness,
+ (u8)brightness);
+ return -EINVAL;
}
static const struct backlight_ops lp855x_bl_ops = {
@@ -387,7 +397,6 @@ static int lp855x_probe(struct i2c_client *cl)
const struct i2c_device_id *id = i2c_client_get_device_id(cl);
const struct acpi_device_id *acpi_id = NULL;
struct device *dev = &cl->dev;
- struct pwm_state pwmstate;
struct lp855x *lp;
int ret;
@@ -470,15 +479,11 @@ static int lp855x_probe(struct i2c_client *cl)
else
return dev_err_probe(dev, ret, "getting PWM\n");
+ lp->needs_pwm_init = false;
lp->mode = REGISTER_BASED;
dev_dbg(dev, "mode: register based\n");
} else {
- pwm_init_state(lp->pwm, &pwmstate);
- /* Legacy platform data compatibility */
- if (lp->pdata->period_ns > 0)
- pwmstate.period = lp->pdata->period_ns;
- pwm_apply_state(lp->pwm, &pwmstate);
-
+ lp->needs_pwm_init = true;
lp->mode = PWM_BASED;
dev_dbg(dev, "mode: PWM based\n");
}
diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
index c6996aa288e6..10129095a4c1 100644
--- a/drivers/video/backlight/qcom-wled.c
+++ b/drivers/video/backlight/qcom-wled.c
@@ -9,8 +9,8 @@
#include <linux/backlight.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
/* From DT binding */
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 1b5a319971ed..30577b1d3de5 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -73,6 +73,7 @@ config DUMMY_CONSOLE_ROWS
config FRAMEBUFFER_CONSOLE
bool "Framebuffer Console support"
depends on FB_CORE && !UML
+ default DRM_FBDEV_EMULATION
select VT_HW_CONSOLE_BINDING
select CRC32
select FONT_SUPPORT
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index eac0ba39581e..c29754b65c0e 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -1762,7 +1762,7 @@ config FB_COBALT
config FB_SH7760
bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
- depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
+ depends on FB=y && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
|| CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721)
select FB_IOMEM_HELPERS
help
diff --git a/drivers/video/fbdev/core/Kconfig b/drivers/video/fbdev/core/Kconfig
index baf7e852c75b..5ac1b0637531 100644
--- a/drivers/video/fbdev/core/Kconfig
+++ b/drivers/video/fbdev/core/Kconfig
@@ -28,7 +28,7 @@ config FIRMWARE_EDID
config FB_DEVICE
bool "Provide legacy /dev/fb* device"
depends on FB_CORE
- default y
+ default FB
help
Say Y here if you want the legacy /dev/fb* device file and
interfaces within sysfs anc procfs. It is only required if you
diff --git a/drivers/video/fbdev/g364fb.c b/drivers/video/fbdev/g364fb.c
index 7a1013b22fa7..ee6fe51e0a6b 100644
--- a/drivers/video/fbdev/g364fb.c
+++ b/drivers/video/fbdev/g364fb.c
@@ -112,7 +112,7 @@ static int g364fb_blank(int blank, struct fb_info *info);
static const struct fb_ops g364fb_ops = {
.owner = THIS_MODULE,
- FB_DEFAULT_IOMEM_HELPERS,
+ FB_DEFAULT_IOMEM_OPS,
.fb_setcolreg = g364fb_setcolreg,
.fb_pan_display = g364fb_pan_display,
.fb_blank = g364fb_blank,
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index c5310eaf8b46..51d8f3299c10 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -172,6 +172,14 @@ struct vring_virtqueue {
/* Host publishes avail event idx */
bool event;
+ /* Do DMA mapping by driver */
+ bool premapped;
+
+ /* Do unmap or not for desc. Just when premapped is False and
+ * use_dma_api is true, this is true.
+ */
+ bool do_unmap;
+
/* Head of free buffer list. */
unsigned int free_head;
/* Number we've added since last sync. */
@@ -355,10 +363,14 @@ static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
}
/* Map one sg entry. */
-static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
- struct scatterlist *sg,
- enum dma_data_direction direction)
+static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg,
+ enum dma_data_direction direction, dma_addr_t *addr)
{
+ if (vq->premapped) {
+ *addr = sg_dma_address(sg);
+ return 0;
+ }
+
if (!vq->use_dma_api) {
/*
* If DMA is not used, KMSAN doesn't know that the scatterlist
@@ -366,7 +378,8 @@ static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
* depending on the direction.
*/
kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction);
- return (dma_addr_t)sg_phys(sg);
+ *addr = (dma_addr_t)sg_phys(sg);
+ return 0;
}
/*
@@ -374,9 +387,14 @@ static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
* the way it expects (we don't guarantee that the scatterlist
* will exist for the lifetime of the mapping).
*/
- return dma_map_page(vring_dma_dev(vq),
+ *addr = dma_map_page(vring_dma_dev(vq),
sg_page(sg), sg->offset, sg->length,
direction);
+
+ if (dma_mapping_error(vring_dma_dev(vq), *addr))
+ return -ENOMEM;
+
+ return 0;
}
static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
@@ -427,7 +445,7 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
{
u16 flags;
- if (!vq->use_dma_api)
+ if (!vq->do_unmap)
return;
flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
@@ -445,18 +463,21 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
struct vring_desc_extra *extra = vq->split.desc_extra;
u16 flags;
- if (!vq->use_dma_api)
- goto out;
-
flags = extra[i].flags;
if (flags & VRING_DESC_F_INDIRECT) {
+ if (!vq->use_dma_api)
+ goto out;
+
dma_unmap_single(vring_dma_dev(vq),
extra[i].addr,
extra[i].len,
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
} else {
+ if (!vq->do_unmap)
+ goto out;
+
dma_unmap_page(vring_dma_dev(vq),
extra[i].addr,
extra[i].len,
@@ -588,8 +609,9 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
for (n = 0; n < out_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
- dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
- if (vring_mapping_error(vq, addr))
+ dma_addr_t addr;
+
+ if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr))
goto unmap_release;
prev = i;
@@ -603,8 +625,9 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
}
for (; n < (out_sgs + in_sgs); n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
- dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
- if (vring_mapping_error(vq, addr))
+ dma_addr_t addr;
+
+ if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr))
goto unmap_release;
prev = i;
@@ -620,7 +643,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
}
/* Last one doesn't continue. */
desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
- if (!indirect && vq->use_dma_api)
+ if (!indirect && vq->do_unmap)
vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
~VRING_DESC_F_NEXT;
@@ -629,8 +652,12 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
dma_addr_t addr = vring_map_single(
vq, desc, total_sg * sizeof(struct vring_desc),
DMA_TO_DEVICE);
- if (vring_mapping_error(vq, addr))
+ if (vring_mapping_error(vq, addr)) {
+ if (vq->premapped)
+ goto free_indirect;
+
goto unmap_release;
+ }
virtqueue_add_desc_split(_vq, vq->split.vring.desc,
head, addr,
@@ -696,6 +723,7 @@ unmap_release:
i = vring_unmap_one_split(vq, i);
}
+free_indirect:
if (indirect)
kfree(desc);
@@ -774,8 +802,10 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
VRING_DESC_F_INDIRECT));
BUG_ON(len == 0 || len % sizeof(struct vring_desc));
- for (j = 0; j < len / sizeof(struct vring_desc); j++)
- vring_unmap_one_split_indirect(vq, &indir_desc[j]);
+ if (vq->do_unmap) {
+ for (j = 0; j < len / sizeof(struct vring_desc); j++)
+ vring_unmap_one_split_indirect(vq, &indir_desc[j]);
+ }
kfree(indir_desc);
vq->split.desc_state[head].indir_desc = NULL;
@@ -1195,17 +1225,20 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
{
u16 flags;
- if (!vq->use_dma_api)
- return;
-
flags = extra->flags;
if (flags & VRING_DESC_F_INDIRECT) {
+ if (!vq->use_dma_api)
+ return;
+
dma_unmap_single(vring_dma_dev(vq),
extra->addr, extra->len,
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
} else {
+ if (!vq->do_unmap)
+ return;
+
dma_unmap_page(vring_dma_dev(vq),
extra->addr, extra->len,
(flags & VRING_DESC_F_WRITE) ?
@@ -1218,7 +1251,7 @@ static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
{
u16 flags;
- if (!vq->use_dma_api)
+ if (!vq->do_unmap)
return;
flags = le16_to_cpu(desc->flags);
@@ -1279,9 +1312,8 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
for (n = 0; n < out_sgs + in_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
- addr = vring_map_one_sg(vq, sg, n < out_sgs ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (vring_mapping_error(vq, addr))
+ if (vring_map_one_sg(vq, sg, n < out_sgs ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr))
goto unmap_release;
desc[i].flags = cpu_to_le16(n < out_sgs ?
@@ -1296,15 +1328,19 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
addr = vring_map_single(vq, desc,
total_sg * sizeof(struct vring_packed_desc),
DMA_TO_DEVICE);
- if (vring_mapping_error(vq, addr))
+ if (vring_mapping_error(vq, addr)) {
+ if (vq->premapped)
+ goto free_desc;
+
goto unmap_release;
+ }
vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
sizeof(struct vring_packed_desc));
vq->packed.vring.desc[head].id = cpu_to_le16(id);
- if (vq->use_dma_api) {
+ if (vq->do_unmap) {
vq->packed.desc_extra[id].addr = addr;
vq->packed.desc_extra[id].len = total_sg *
sizeof(struct vring_packed_desc);
@@ -1355,6 +1391,7 @@ unmap_release:
for (i = 0; i < err_idx; i++)
vring_unmap_desc_packed(vq, &desc[i]);
+free_desc:
kfree(desc);
END_USE(vq);
@@ -1426,9 +1463,10 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
c = 0;
for (n = 0; n < out_sgs + in_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
- dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (vring_mapping_error(vq, addr))
+ dma_addr_t addr;
+
+ if (vring_map_one_sg(vq, sg, n < out_sgs ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr))
goto unmap_release;
flags = cpu_to_le16(vq->packed.avail_used_flags |
@@ -1443,7 +1481,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
desc[i].len = cpu_to_le32(sg->length);
desc[i].id = cpu_to_le16(id);
- if (unlikely(vq->use_dma_api)) {
+ if (unlikely(vq->do_unmap)) {
vq->packed.desc_extra[curr].addr = addr;
vq->packed.desc_extra[curr].len = sg->length;
vq->packed.desc_extra[curr].flags =
@@ -1461,7 +1499,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
}
}
- if (i < head)
+ if (i <= head)
vq->packed.avail_wrap_counter ^= 1;
/* We're using some buffers from the free list. */
@@ -1577,7 +1615,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
vq->free_head = id;
vq->vq.num_free += state->num;
- if (unlikely(vq->use_dma_api)) {
+ if (unlikely(vq->do_unmap)) {
curr = id;
for (i = 0; i < state->num; i++) {
vring_unmap_extra_packed(vq,
@@ -1594,7 +1632,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
if (!desc)
return;
- if (vq->use_dma_api) {
+ if (vq->do_unmap) {
len = vq->packed.desc_extra[id].len;
for (i = 0; i < len / sizeof(struct vring_packed_desc);
i++)
@@ -2052,6 +2090,8 @@ static struct virtqueue *vring_create_virtqueue_packed(
vq->packed_ring = true;
vq->dma_dev = dma_dev;
vq->use_dma_api = vring_use_dma_api(vdev);
+ vq->premapped = false;
+ vq->do_unmap = vq->use_dma_api;
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -2112,6 +2152,43 @@ err_ring:
return -ENOMEM;
}
+static int virtqueue_disable_and_recycle(struct virtqueue *_vq,
+ void (*recycle)(struct virtqueue *vq, void *buf))
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = vq->vq.vdev;
+ void *buf;
+ int err;
+
+ if (!vq->we_own_ring)
+ return -EPERM;
+
+ if (!vdev->config->disable_vq_and_reset)
+ return -ENOENT;
+
+ if (!vdev->config->enable_vq_after_reset)
+ return -ENOENT;
+
+ err = vdev->config->disable_vq_and_reset(_vq);
+ if (err)
+ return err;
+
+ while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
+ recycle(_vq, buf);
+
+ return 0;
+}
+
+static int virtqueue_enable_after_reset(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = vq->vq.vdev;
+
+ if (vdev->config->enable_vq_after_reset(_vq))
+ return -EBUSY;
+
+ return 0;
+}
/*
* Generic functions and exported symbols.
@@ -2238,6 +2315,23 @@ int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
/**
+ * virtqueue_dma_dev - get the dma dev
+ * @_vq: the struct virtqueue we're talking about.
+ *
+ * Returns the dma dev. That can been used for dma api.
+ */
+struct device *virtqueue_dma_dev(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ if (vq->use_dma_api)
+ return vring_dma_dev(vq);
+ else
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_dev);
+
+/**
* virtqueue_kick_prepare - first half of split virtqueue_kick call.
* @_vq: the struct virtqueue
*
@@ -2541,6 +2635,8 @@ static struct virtqueue *__vring_new_virtqueue(unsigned int index,
#endif
vq->dma_dev = dma_dev;
vq->use_dma_api = vring_use_dma_api(vdev);
+ vq->premapped = false;
+ vq->do_unmap = vq->use_dma_api;
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -2619,7 +2715,7 @@ EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
* virtqueue_resize - resize the vring of vq
* @_vq: the struct virtqueue we're talking about.
* @num: new ring num
- * @recycle: callback for recycle the useless buffer
+ * @recycle: callback to recycle unused buffers
*
* When it is really necessary to create a new vring, it will set the current vq
* into the reset state. Then call the passed callback to recycle the buffer
@@ -2643,13 +2739,8 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num,
void (*recycle)(struct virtqueue *vq, void *buf))
{
struct vring_virtqueue *vq = to_vvq(_vq);
- struct virtio_device *vdev = vq->vq.vdev;
- void *buf;
int err;
- if (!vq->we_own_ring)
- return -EPERM;
-
if (num > vq->vq.num_max)
return -E2BIG;
@@ -2659,31 +2750,101 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num,
if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num)
return 0;
- if (!vdev->config->disable_vq_and_reset)
- return -ENOENT;
-
- if (!vdev->config->enable_vq_after_reset)
- return -ENOENT;
-
- err = vdev->config->disable_vq_and_reset(_vq);
+ err = virtqueue_disable_and_recycle(_vq, recycle);
if (err)
return err;
- while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
- recycle(_vq, buf);
-
if (vq->packed_ring)
err = virtqueue_resize_packed(_vq, num);
else
err = virtqueue_resize_split(_vq, num);
- if (vdev->config->enable_vq_after_reset(_vq))
- return -EBUSY;
-
- return err;
+ return virtqueue_enable_after_reset(_vq);
}
EXPORT_SYMBOL_GPL(virtqueue_resize);
+/**
+ * virtqueue_set_dma_premapped - set the vring premapped mode
+ * @_vq: the struct virtqueue we're talking about.
+ *
+ * Enable the premapped mode of the vq.
+ *
+ * The vring in premapped mode does not do dma internally, so the driver must
+ * do dma mapping in advance. The driver must pass the dma_address through
+ * dma_address of scatterlist. When the driver got a used buffer from
+ * the vring, it has to unmap the dma address.
+ *
+ * This function must be called immediately after creating the vq, or after vq
+ * reset, and before adding any buffers to it.
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error.
+ * 0: success.
+ * -EINVAL: vring does not use the dma api, so we can not enable premapped mode.
+ */
+int virtqueue_set_dma_premapped(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ u32 num;
+
+ START_USE(vq);
+
+ num = vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
+
+ if (num != vq->vq.num_free) {
+ END_USE(vq);
+ return -EINVAL;
+ }
+
+ if (!vq->use_dma_api) {
+ END_USE(vq);
+ return -EINVAL;
+ }
+
+ vq->premapped = true;
+ vq->do_unmap = false;
+
+ END_USE(vq);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtqueue_set_dma_premapped);
+
+/**
+ * virtqueue_reset - detach and recycle all unused buffers
+ * @_vq: the struct virtqueue we're talking about.
+ * @recycle: callback to recycle unused buffers
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error.
+ * 0: success.
+ * -EBUSY: Failed to sync with device, vq may not work properly
+ * -ENOENT: Transport or device not supported
+ * -EPERM: Operation not permitted
+ */
+int virtqueue_reset(struct virtqueue *_vq,
+ void (*recycle)(struct virtqueue *vq, void *buf))
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ int err;
+
+ err = virtqueue_disable_and_recycle(_vq, recycle);
+ if (err)
+ return err;
+
+ if (vq->packed_ring)
+ virtqueue_reinit_packed(vq);
+ else
+ virtqueue_reinit_split(vq);
+
+ return virtqueue_enable_after_reset(_vq);
+}
+EXPORT_SYMBOL_GPL(virtqueue_reset);
+
/* Only available for split ring */
struct virtqueue *vring_new_virtqueue(unsigned int index,
unsigned int num,
@@ -2945,4 +3106,149 @@ const struct vring *virtqueue_get_vring(const struct virtqueue *vq)
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring);
+/**
+ * virtqueue_dma_map_single_attrs - map DMA for _vq
+ * @_vq: the struct virtqueue we're talking about.
+ * @ptr: the pointer of the buffer to do dma
+ * @size: the size of the buffer to do dma
+ * @dir: DMA direction
+ * @attrs: DMA Attrs
+ *
+ * The caller calls this to do dma mapping in advance. The DMA address can be
+ * passed to this _vq when it is in pre-mapped mode.
+ *
+ * return DMA address. Caller should check that by virtqueue_dma_mapping_error().
+ */
+dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr,
+ size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ if (!vq->use_dma_api)
+ return (dma_addr_t)virt_to_phys(ptr);
+
+ return dma_map_single_attrs(vring_dma_dev(vq), ptr, size, dir, attrs);
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_map_single_attrs);
+
+/**
+ * virtqueue_dma_unmap_single_attrs - unmap DMA for _vq
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: the dma address to unmap
+ * @size: the size of the buffer
+ * @dir: DMA direction
+ * @attrs: DMA Attrs
+ *
+ * Unmap the address that is mapped by the virtqueue_dma_map_* APIs.
+ *
+ */
+void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ if (!vq->use_dma_api)
+ return;
+
+ dma_unmap_single_attrs(vring_dma_dev(vq), addr, size, dir, attrs);
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_unmap_single_attrs);
+
+/**
+ * virtqueue_dma_mapping_error - check dma address
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: DMA address
+ *
+ * Returns 0 means dma valid. Other means invalid dma address.
+ */
+int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ if (!vq->use_dma_api)
+ return 0;
+
+ return dma_mapping_error(vring_dma_dev(vq), addr);
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_mapping_error);
+
+/**
+ * virtqueue_dma_need_sync - check a dma address needs sync
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: DMA address
+ *
+ * Check if the dma address mapped by the virtqueue_dma_map_* APIs needs to be
+ * synchronized
+ *
+ * return bool
+ */
+bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ if (!vq->use_dma_api)
+ return false;
+
+ return dma_need_sync(vring_dma_dev(vq), addr);
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_need_sync);
+
+/**
+ * virtqueue_dma_sync_single_range_for_cpu - dma sync for cpu
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: DMA address
+ * @offset: DMA address offset
+ * @size: buf size for sync
+ * @dir: DMA direction
+ *
+ * Before calling this function, use virtqueue_dma_need_sync() to confirm that
+ * the DMA address really needs to be synchronized
+ *
+ */
+void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq,
+ dma_addr_t addr,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct device *dev = vring_dma_dev(vq);
+
+ if (!vq->use_dma_api)
+ return;
+
+ dma_sync_single_range_for_cpu(dev, addr, offset, size,
+ DMA_BIDIRECTIONAL);
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_cpu);
+
+/**
+ * virtqueue_dma_sync_single_range_for_device - dma sync for device
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: DMA address
+ * @offset: DMA address offset
+ * @size: buf size for sync
+ * @dir: DMA direction
+ *
+ * Before calling this function, use virtqueue_dma_need_sync() to confirm that
+ * the DMA address really needs to be synchronized
+ */
+void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq,
+ dma_addr_t addr,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct device *dev = vring_dma_dev(vq);
+
+ if (!vq->use_dma_api)
+ return;
+
+ dma_sync_single_range_for_device(dev, addr, offset, size,
+ DMA_BIDIRECTIONAL);
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_device);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index 961161da5900..06ce6d8c2e00 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -366,11 +366,14 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct irq_affinity default_affd = { 0 };
struct cpumask *masks;
struct vdpa_callback cb;
+ bool has_affinity = desc && ops->set_vq_affinity;
int i, err, queue_idx = 0;
- masks = create_affinity_masks(nvqs, desc ? desc : &default_affd);
- if (!masks)
- return -ENOMEM;
+ if (has_affinity) {
+ masks = create_affinity_masks(nvqs, desc ? desc : &default_affd);
+ if (!masks)
+ return -ENOMEM;
+ }
for (i = 0; i < nvqs; ++i) {
if (!names[i]) {
@@ -386,20 +389,22 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
goto err_setup_vq;
}
- if (ops->set_vq_affinity)
+ if (has_affinity)
ops->set_vq_affinity(vdpa, i, &masks[i]);
}
cb.callback = virtio_vdpa_config_cb;
cb.private = vd_dev;
ops->set_config_cb(vdpa, &cb);
- kfree(masks);
+ if (has_affinity)
+ kfree(masks);
return 0;
err_setup_vq:
virtio_vdpa_del_vqs(vdev);
- kfree(masks);
+ if (has_affinity)
+ kfree(masks);
return err;
}
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index c1de8a92e144..b2d76c1784bd 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -551,7 +551,7 @@ static struct i2c_driver ds2482_driver = {
.driver = {
.name = "ds2482",
},
- .probe_new = ds2482_probe,
+ .probe = ds2482_probe,
.remove = ds2482_remove,
.id_table = ds2482_id,
};
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 0cbfb496b9c8..751458959411 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -307,7 +307,7 @@ config XILINX_WATCHDOG
config XILINX_WINDOW_WATCHDOG
tristate "Xilinx window watchdog timer"
depends on HAS_IOMEM
- depends on ARM64
+ depends on ARM64 || COMPILE_TEST
select WATCHDOG_CORE
help
Window watchdog driver for the versal_wwdt IP core.
@@ -343,7 +343,7 @@ config RAVE_SP_WATCHDOG
config MLX_WDT
tristate "Mellanox Watchdog"
- depends on MELLANOX_PLATFORM
+ depends on MELLANOX_PLATFORM || COMPILE_TEST
select WATCHDOG_CORE
select REGMAP
help
@@ -493,7 +493,7 @@ config FTWDT010_WATCHDOG
config IXP4XX_WATCHDOG
tristate "IXP4xx Watchdog"
- depends on ARCH_IXP4XX
+ depends on ARCH_IXP4XX || (ARM && COMPILE_TEST)
select WATCHDOG_CORE
help
Say Y here if to include support for the watchdog timer
@@ -529,7 +529,7 @@ config S3C2410_WATCHDOG
config SA1100_WATCHDOG
tristate "SA1100/PXA2xx watchdog"
- depends on ARCH_SA1100 || ARCH_PXA
+ depends on ARCH_SA1100 || ARCH_PXA || COMPILE_TEST
help
Watchdog timer embedded into SA11x0 and PXA2xx chips. This will
reboot your system when timeout is reached.
@@ -720,7 +720,7 @@ config IMX2_WDT
config IMX_SC_WDT
tristate "IMX SC Watchdog"
depends on HAVE_ARM_SMCCC
- depends on IMX_SCU
+ depends on IMX_SCU || COMPILE_TEST
select WATCHDOG_CORE
help
This is the driver for the system controller watchdog
@@ -931,7 +931,7 @@ config ASPEED_WATCHDOG
config STM32_WATCHDOG
tristate "STM32 Independent WatchDoG (IWDG) support"
- depends on ARCH_STM32
+ depends on ARCH_STM32 || COMPILE_TEST
select WATCHDOG_CORE
default y
help
@@ -1065,7 +1065,7 @@ config ACQUIRE_WDT
config ADVANTECH_WDT
tristate "Advantech SBC Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
If you are configuring a Linux kernel for the Advantech single-board
computer, say `Y' here to support its built-in watchdog timer
@@ -1074,14 +1074,16 @@ config ADVANTECH_WDT
config ADVANTECH_EC_WDT
tristate "Advantech Embedded Controller Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
+ select ISA_BUS_API
+ select WATCHDOG_CORE
help
This driver supports Advantech products with ITE based Embedded Controller.
It does not support Advantech products with other ECs or without EC.
config ALIM1535_WDT
tristate "ALi M1535 PMU Watchdog Timer"
- depends on X86 && PCI
+ depends on (X86 || COMPILE_TEST) && PCI
help
This is the driver for the hardware watchdog on the ALi M1535 PMU.
@@ -1105,7 +1107,7 @@ config ALIM7101_WDT
config EBC_C384_WDT
tristate "WinSystems EBC-C384 Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
select ISA_BUS_API
select WATCHDOG_CORE
help
@@ -1115,7 +1117,7 @@ config EBC_C384_WDT
config EXAR_WDT
tristate "Exar Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
select WATCHDOG_CORE
help
Enables watchdog timer support for the watchdog timer present
@@ -1126,7 +1128,7 @@ config EXAR_WDT
config F71808E_WDT
tristate "Fintek F718xx, F818xx Super I/O Watchdog"
- depends on X86
+ depends on X86 || COMPILE_TEST
select WATCHDOG_CORE
help
This is the driver for the hardware watchdog on the Fintek F71808E,
@@ -1138,7 +1140,7 @@ config F71808E_WDT
config SP5100_TCO
tristate "AMD/ATI SP5100 TCO Timer/Watchdog"
- depends on X86 && PCI
+ depends on (X86 || COMPILE_TEST) && PCI
select WATCHDOG_CORE
help
Hardware watchdog driver for the AMD/ATI SP5100 chipset. The TCO
@@ -1177,7 +1179,7 @@ config SC520_WDT
config SBC_FITPC2_WATCHDOG
tristate "Compulab SBC-FITPC2 watchdog"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is the driver for the built-in watchdog timer on the fit-PC2,
fit-PC2i, CM-iAM single-board computers made by Compulab.
@@ -1202,7 +1204,7 @@ config SBC_FITPC2_WATCHDOG
config EUROTECH_WDT
tristate "Eurotech CPU-1220/1410 Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
Enable support for the watchdog timer on the Eurotech CPU-1220 and
CPU-1410 cards. These are PC/104 SBCs. Spec sheets and product
@@ -1210,7 +1212,7 @@ config EUROTECH_WDT
config IB700_WDT
tristate "IB700 SBC Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is the driver for the hardware watchdog on the IB700 Single
Board Computer produced by TMC Technology (www.tmc-uk.com). This
@@ -1227,7 +1229,7 @@ config IB700_WDT
config IBMASR
tristate "IBM Automatic Server Restart"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is the driver for the IBM Automatic Server Restart watchdog
timer built-in into some eServer xSeries machines.
@@ -1237,7 +1239,7 @@ config IBMASR
config WAFER_WDT
tristate "ICP Single Board Computer Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is a driver for the hardware watchdog on the ICP Single
Board Computer. This driver is working on (at least) the following
@@ -1259,7 +1261,7 @@ config I6300ESB_WDT
config IE6XX_WDT
tristate "Intel Atom E6xx Watchdog"
- depends on X86 && PCI
+ depends on (X86 || COMPILE_TEST) && PCI
select WATCHDOG_CORE
select MFD_CORE
select LPC_SCH
@@ -1319,7 +1321,7 @@ config ITCO_VENDOR_SUPPORT
config IT8712F_WDT
tristate "IT8712F (Smart Guardian) Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is the driver for the built-in watchdog timer on the IT8712F
Super I/0 chipset used on many motherboards.
@@ -1332,7 +1334,7 @@ config IT8712F_WDT
config IT87_WDT
tristate "IT87 Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
select WATCHDOG_CORE
help
This is the driver for the hardware watchdog on the ITE IT8607,
@@ -1350,7 +1352,7 @@ config IT87_WDT
config HP_WATCHDOG
tristate "HP ProLiant iLO2+ Hardware Watchdog Timer"
select WATCHDOG_CORE
- depends on (ARM64 || X86) && PCI
+ depends on (ARM64 || X86 || COMPILE_TEST) && PCI
help
A software monitoring watchdog and NMI handling driver. This driver
will detect lockups and provide a stack trace. This is a driver that
@@ -1380,7 +1382,7 @@ config KEMPLD_WDT
config SC1200_WDT
tristate "National Semiconductor PC87307/PC97307 (ala SC1200) Watchdog"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is a driver for National Semiconductor PC87307/PC97307 hardware
watchdog cards as found on the SC1200. This watchdog is mainly used
@@ -1403,7 +1405,7 @@ config SCx200_WDT
config PC87413_WDT
tristate "NS PC87413 watchdog"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is the driver for the hardware watchdog on the PC87413 chipset
This watchdog simply watches your kernel to make sure it doesn't
@@ -1417,7 +1419,7 @@ config PC87413_WDT
config NV_TCO
tristate "nVidia TCO Timer/Watchdog"
- depends on X86 && PCI
+ depends on (X86 || COMPILE_TEST) && PCI
help
Hardware driver for the TCO timer built into the nVidia Hub family
(such as the MCP51). The TCO (Total Cost of Ownership) timer is a
@@ -1446,7 +1448,7 @@ config RDC321X_WDT
config 60XX_WDT
tristate "SBC-60XX Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This driver can be used with the watchdog timer found on some
single board computers, namely the 6010 PII based computer.
@@ -1486,7 +1488,7 @@ config SBC7240_WDT
config CPU5_WDT
tristate "SMA CPU5 Watchdog"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
TBD.
To compile this driver as a module, choose M here: the
@@ -1494,7 +1496,7 @@ config CPU5_WDT
config SMSC_SCH311X_WDT
tristate "SMSC SCH311X Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is the driver for the hardware watchdog timer on the
SMSC SCH3112, SCH3114 and SCH3116 Super IO chipset
@@ -1506,7 +1508,7 @@ config SMSC_SCH311X_WDT
config SMSC37B787_WDT
tristate "Winbond SMsC37B787 Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is the driver for the hardware watchdog component on the
Winbond SMsC37B787 chipset as used on the NetRunner Mainboard
@@ -1526,7 +1528,7 @@ config SMSC37B787_WDT
config TQMX86_WDT
tristate "TQ-Systems TQMX86 Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
select WATCHDOG_CORE
help
This is the driver for the hardware watchdog timer in the TQMX86 IO
@@ -1539,7 +1541,7 @@ config TQMX86_WDT
config VIA_WDT
tristate "VIA Watchdog Timer"
- depends on X86 && PCI
+ depends on (X86 || COMPILE_TEST) && PCI
select WATCHDOG_CORE
help
This is the driver for the hardware watchdog timer on VIA
@@ -1552,7 +1554,7 @@ config VIA_WDT
config W83627HF_WDT
tristate "Watchdog timer for W83627HF/W83627DHG and compatibles"
- depends on X86
+ depends on X86 || COMPILE_TEST
select WATCHDOG_CORE
help
This is the driver for the hardware watchdog on the following
@@ -1582,7 +1584,7 @@ config W83627HF_WDT
config W83877F_WDT
tristate "W83877F (EMACS) Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is the driver for the hardware watchdog on the W83877F chipset
as used in EMACS PC-104 motherboards (and likely others). This
@@ -1597,7 +1599,7 @@ config W83877F_WDT
config W83977F_WDT
tristate "W83977F (PCM-5335) Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is the driver for the hardware watchdog on the W83977F I/O chip
as used in AAEON's PCM-5335 SBC (and likely others). This
@@ -1610,7 +1612,7 @@ config W83977F_WDT
config MACHZ_WDT
tristate "ZF MachZ Watchdog"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
If you are using a ZF Micro MachZ processor, say Y here, otherwise
N. This is the driver for the watchdog timer built-in on that
@@ -1623,7 +1625,7 @@ config MACHZ_WDT
config SBC_EPX_C3_WATCHDOG
tristate "Winsystems SBC EPX-C3 watchdog"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is the driver for the built-in watchdog timer on the EPX-C3
Single-board computer made by Winsystems, Inc.
@@ -1739,7 +1741,7 @@ config INDYDOG
config JZ4740_WDT
tristate "Ingenic jz4740 SoC hardware watchdog"
- depends on MIPS
+ depends on MIPS || COMPILE_TEST
depends on COMMON_CLK
select WATCHDOG_CORE
select MFD_SYSCON
@@ -1798,6 +1800,19 @@ config OCTEON_WDT
from the first interrupt, it is then only poked when the
device is written.
+config MARVELL_GTI_WDT
+ tristate "Marvell GTI Watchdog driver"
+ depends on ARCH_THUNDER || (COMPILE_TEST && 64BIT)
+ default y
+ select WATCHDOG_CORE
+ help
+ Marvell GTI hardware supports watchdog timer. First timeout
+ works as watchdog pretimeout and installed interrupt handler
+ will be called on first timeout. Hardware can generate interrupt
+ to SCP on second timeout but it is not enabled, so second
+ timeout is ignored. If device poke does not happen then system
+ will reboot on third timeout.
+
config BCM2835_WDT
tristate "Broadcom BCM2835 hardware watchdog"
depends on ARCH_BCM2835 || (OF && COMPILE_TEST)
@@ -1823,7 +1838,7 @@ config BCM_KONA_WDT
config BCM_KONA_WDT_DEBUG
bool "DEBUGFS support for BCM Kona Watchdog"
- depends on BCM_KONA_WDT
+ depends on BCM_KONA_WDT || COMPILE_TEST
help
If enabled, adds /sys/kernel/debug/bcm_kona_wdt/info which provides
access to the driver's internal data structures as well as watchdog
@@ -1864,7 +1879,7 @@ config LANTIQ_WDT
config LOONGSON1_WDT
tristate "Loongson1 SoC hardware watchdog"
- depends on MACH_LOONGSON32
+ depends on MACH_LOONGSON32 || COMPILE_TEST
select WATCHDOG_CORE
help
Hardware driver for the Loongson1 SoC Watchdog Timer.
@@ -1878,7 +1893,7 @@ config RALINK_WDT
config GXP_WATCHDOG
tristate "HPE GXP watchdog support"
- depends on ARCH_HPE_GXP
+ depends on ARCH_HPE_GXP || COMPILE_TEST
select WATCHDOG_CORE
help
Say Y here to include support for the watchdog timer
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 3633f5b98236..7eab9de311cb 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -98,6 +98,7 @@ obj-$(CONFIG_VISCONTI_WATCHDOG) += visconti_wdt.o
obj-$(CONFIG_MSC313E_WATCHDOG) += msc313e_wdt.o
obj-$(CONFIG_APPLE_WATCHDOG) += apple_wdt.o
obj-$(CONFIG_SUNPLUS_WATCHDOG) += sunplus_wdt.o
+obj-$(CONFIG_MARVELL_GTI_WDT) += marvell_gti_wdt.o
# X86 (i386 + ia64 + x86_64) Architecture
obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
diff --git a/drivers/watchdog/armada_37xx_wdt.c b/drivers/watchdog/armada_37xx_wdt.c
index e58652939f8a..8133a5d05647 100644
--- a/drivers/watchdog/armada_37xx_wdt.c
+++ b/drivers/watchdog/armada_37xx_wdt.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/types.h>
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index d20ec27ba354..558015f08c7a 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -18,6 +18,7 @@
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/atmel-st.h>
#include <linux/miscdevice.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
@@ -26,8 +27,6 @@
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/uaccess.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#define WDT_DEFAULT_TIME 5 /* seconds */
#define WDT_MAX_TIME 256 /* seconds */
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 47250f9b68c7..901b94d456db 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -31,7 +31,7 @@
#include <linux/mutex.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <asm/irq.h>
diff --git a/drivers/watchdog/ftwdt010_wdt.c b/drivers/watchdog/ftwdt010_wdt.c
index 442c5bf63ff4..28f5af752c10 100644
--- a/drivers/watchdog/ftwdt010_wdt.c
+++ b/drivers/watchdog/ftwdt010_wdt.c
@@ -14,7 +14,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/watchdog.h>
@@ -221,20 +221,18 @@ static const struct dev_pm_ops ftwdt010_wdt_dev_pm_ops = {
ftwdt010_wdt_resume)
};
-#ifdef CONFIG_OF
static const struct of_device_id ftwdt010_wdt_match[] = {
{ .compatible = "faraday,ftwdt010" },
{ .compatible = "cortina,gemini-watchdog" },
{},
};
MODULE_DEVICE_TABLE(of, ftwdt010_wdt_match);
-#endif
static struct platform_driver ftwdt010_wdt_driver = {
.probe = ftwdt010_wdt_probe,
.driver = {
.name = "ftwdt010-wdt",
- .of_match_table = of_match_ptr(ftwdt010_wdt_match),
+ .of_match_table = ftwdt010_wdt_match,
.pm = &ftwdt010_wdt_dev_pm_ops,
},
};
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index 97afc907f659..6a1db1c783fa 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -31,7 +31,7 @@
#include <linux/fs.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/uaccess.h>
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 6fcc3596103c..42e8ffae18dd 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -26,8 +26,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/watchdog.h>
@@ -375,7 +374,7 @@ static void imx2_wdt_shutdown(struct platform_device *pdev)
*/
imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
imx2_wdt_ping(wdog);
- dev_crit(&pdev->dev, "Device shutdown: Expect reboot!\n");
+ dev_crit(&pdev->dev, "Device shutdown.\n");
}
}
diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c
index 7ca486794ba7..c703586c6e5f 100644
--- a/drivers/watchdog/imx7ulp_wdt.c
+++ b/drivers/watchdog/imx7ulp_wdt.c
@@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
index 9b2173f765c8..fb7fae750181 100644
--- a/drivers/watchdog/intel-mid_wdt.c
+++ b/drivers/watchdog/intel-mid_wdt.c
@@ -203,3 +203,4 @@ module_platform_driver(mid_wdt_driver);
MODULE_AUTHOR("David Cohen <david.a.cohen@linux.intel.com>");
MODULE_DESCRIPTION("Watchdog Driver for Intel MID platform");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:intel_mid_wdt");
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
index 6fab504af88b..a273b97ebcb4 100644
--- a/drivers/watchdog/lantiq_wdt.c
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -9,7 +9,8 @@
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/watchdog.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/clk.h>
#include <linux/io.h>
diff --git a/drivers/watchdog/loongson1_wdt.c b/drivers/watchdog/loongson1_wdt.c
index 4ac7810a314d..0587ff44d3a1 100644
--- a/drivers/watchdog/loongson1_wdt.c
+++ b/drivers/watchdog/loongson1_wdt.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/watchdog/marvell_gti_wdt.c b/drivers/watchdog/marvell_gti_wdt.c
new file mode 100644
index 000000000000..d7eb8286e11e
--- /dev/null
+++ b/drivers/watchdog/marvell_gti_wdt.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell GTI Watchdog driver
+ *
+ * Copyright (C) 2023 Marvell.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+/*
+ * Hardware supports following mode of operation:
+ * 1) Interrupt Only:
+ * This will generate the interrupt to arm core whenever timeout happens.
+ *
+ * 2) Interrupt + del3t (Interrupt to firmware (SCP processor)).
+ * This will generate interrupt to arm core on 1st timeout happens
+ * This will generate interrupt to SCP processor on 2nd timeout happens
+ *
+ * 3) Interrupt + Interrupt to SCP processor (called delt3t) + reboot.
+ * This will generate interrupt to arm core on 1st timeout happens
+ * Will generate interrupt to SCP processor on 2nd timeout happens,
+ * if interrupt is configured.
+ * Reboot on 3rd timeout.
+ *
+ * Driver will use hardware in mode-3 above so that system can reboot in case
+ * a hardware hang. Also h/w is configured not to generate SCP interrupt, so
+ * effectively 2nd timeout is ignored within hardware.
+ *
+ * First timeout is effectively watchdog pretimeout.
+ */
+
+/* GTI CWD Watchdog (GTI_CWD_WDOG) Register */
+#define GTI_CWD_WDOG(reg_offset) (0x8 * (reg_offset))
+#define GTI_CWD_WDOG_MODE_INT_DEL3T_RST 0x3
+#define GTI_CWD_WDOG_MODE_MASK GENMASK_ULL(1, 0)
+#define GTI_CWD_WDOG_LEN_SHIFT 4
+#define GTI_CWD_WDOG_LEN_MASK GENMASK_ULL(19, 4)
+#define GTI_CWD_WDOG_CNT_SHIFT 20
+#define GTI_CWD_WDOG_CNT_MASK GENMASK_ULL(43, 20)
+
+/* GTI CWD Watchdog Interrupt (GTI_CWD_INT) Register */
+#define GTI_CWD_INT 0x200
+#define GTI_CWD_INT_PENDING_STATUS(bit) BIT_ULL(bit)
+
+/* GTI CWD Watchdog Interrupt Enable Clear (GTI_CWD_INT_ENA_CLR) Register */
+#define GTI_CWD_INT_ENA_CLR 0x210
+#define GTI_CWD_INT_ENA_CLR_VAL(bit) BIT_ULL(bit)
+
+/* GTI CWD Watchdog Interrupt Enable Set (GTI_CWD_INT_ENA_SET) Register */
+#define GTI_CWD_INT_ENA_SET 0x218
+#define GTI_CWD_INT_ENA_SET_VAL(bit) BIT_ULL(bit)
+
+/* GTI CWD Watchdog Poke (GTI_CWD_POKE) Registers */
+#define GTI_CWD_POKE(reg_offset) (0x10000 + 0x8 * (reg_offset))
+#define GTI_CWD_POKE_VAL 1
+
+struct gti_match_data {
+ u32 gti_num_timers;
+};
+
+static const struct gti_match_data match_data_octeontx2 = {
+ .gti_num_timers = 54,
+};
+
+static const struct gti_match_data match_data_cn10k = {
+ .gti_num_timers = 64,
+};
+
+struct gti_wdt_priv {
+ struct watchdog_device wdev;
+ void __iomem *base;
+ u32 clock_freq;
+ struct clk *sclk;
+ /* wdt_timer_idx used for timer to be used for system watchdog */
+ u32 wdt_timer_idx;
+ const struct gti_match_data *data;
+};
+
+static irqreturn_t gti_wdt_interrupt(int irq, void *data)
+{
+ struct watchdog_device *wdev = data;
+ struct gti_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ /* Clear Interrupt Pending Status */
+ writeq(GTI_CWD_INT_PENDING_STATUS(priv->wdt_timer_idx),
+ priv->base + GTI_CWD_INT);
+
+ watchdog_notify_pretimeout(wdev);
+
+ return IRQ_HANDLED;
+}
+
+static int gti_wdt_ping(struct watchdog_device *wdev)
+{
+ struct gti_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ writeq(GTI_CWD_POKE_VAL,
+ priv->base + GTI_CWD_POKE(priv->wdt_timer_idx));
+
+ return 0;
+}
+
+static int gti_wdt_start(struct watchdog_device *wdev)
+{
+ struct gti_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ u64 regval;
+
+ if (!wdev->pretimeout)
+ return -EINVAL;
+
+ set_bit(WDOG_HW_RUNNING, &wdev->status);
+
+ /* Clear any pending interrupt */
+ writeq(GTI_CWD_INT_PENDING_STATUS(priv->wdt_timer_idx),
+ priv->base + GTI_CWD_INT);
+
+ /* Enable Interrupt */
+ writeq(GTI_CWD_INT_ENA_SET_VAL(priv->wdt_timer_idx),
+ priv->base + GTI_CWD_INT_ENA_SET);
+
+ /* Set (Interrupt + SCP interrupt (DEL3T) + core domain reset) Mode */
+ regval = readq(priv->base + GTI_CWD_WDOG(priv->wdt_timer_idx));
+ regval |= GTI_CWD_WDOG_MODE_INT_DEL3T_RST;
+ writeq(regval, priv->base + GTI_CWD_WDOG(priv->wdt_timer_idx));
+
+ return 0;
+}
+
+static int gti_wdt_stop(struct watchdog_device *wdev)
+{
+ struct gti_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ u64 regval;
+
+ /* Disable Interrupt */
+ writeq(GTI_CWD_INT_ENA_CLR_VAL(priv->wdt_timer_idx),
+ priv->base + GTI_CWD_INT_ENA_CLR);
+
+ /* Set GTI_CWD_WDOG.Mode = 0 to stop the timer */
+ regval = readq(priv->base + GTI_CWD_WDOG(priv->wdt_timer_idx));
+ regval &= ~GTI_CWD_WDOG_MODE_MASK;
+ writeq(regval, priv->base + GTI_CWD_WDOG(priv->wdt_timer_idx));
+
+ return 0;
+}
+
+static int gti_wdt_settimeout(struct watchdog_device *wdev,
+ unsigned int timeout)
+{
+ struct gti_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ u64 timeout_wdog, regval;
+
+ /* Update new timeout */
+ wdev->timeout = timeout;
+
+ /* Pretimeout is 1/3 of timeout */
+ wdev->pretimeout = timeout / 3;
+
+ /* Get clock cycles from pretimeout */
+ timeout_wdog = (u64)priv->clock_freq * wdev->pretimeout;
+
+ /* Watchdog counts in 1024 cycle steps */
+ timeout_wdog = timeout_wdog >> 10;
+
+ /* GTI_CWD_WDOG.CNT: reload counter is 16-bit */
+ timeout_wdog = (timeout_wdog + 0xff) >> 8;
+ if (timeout_wdog >= 0x10000)
+ timeout_wdog = 0xffff;
+
+ /*
+ * GTI_CWD_WDOG.LEN is 24bit, lower 8-bits should be zero and
+ * upper 16-bits are same as GTI_CWD_WDOG.CNT
+ */
+ regval = readq(priv->base + GTI_CWD_WDOG(priv->wdt_timer_idx));
+ regval &= GTI_CWD_WDOG_MODE_MASK;
+ regval |= (timeout_wdog << (GTI_CWD_WDOG_CNT_SHIFT + 8)) |
+ (timeout_wdog << GTI_CWD_WDOG_LEN_SHIFT);
+ writeq(regval, priv->base + GTI_CWD_WDOG(priv->wdt_timer_idx));
+
+ return 0;
+}
+
+static int gti_wdt_set_pretimeout(struct watchdog_device *wdev,
+ unsigned int timeout)
+{
+ struct gti_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ struct watchdog_device *wdog_dev = &priv->wdev;
+
+ /* pretimeout should 1/3 of max_timeout */
+ if (timeout * 3 <= wdog_dev->max_timeout)
+ return gti_wdt_settimeout(wdev, timeout * 3);
+
+ return -EINVAL;
+}
+
+static void gti_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static int gti_wdt_get_cntfrq(struct platform_device *pdev,
+ struct gti_wdt_priv *priv)
+{
+ int err;
+
+ priv->sclk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(priv->sclk))
+ return PTR_ERR(priv->sclk);
+
+ err = devm_add_action_or_reset(&pdev->dev,
+ gti_clk_disable_unprepare, priv->sclk);
+ if (err)
+ return err;
+
+ priv->clock_freq = clk_get_rate(priv->sclk);
+ if (!priv->clock_freq)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct watchdog_info gti_wdt_ident = {
+ .identity = "Marvell GTI watchdog",
+ .options = WDIOF_SETTIMEOUT | WDIOF_PRETIMEOUT | WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE | WDIOF_CARDRESET,
+};
+
+static const struct watchdog_ops gti_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = gti_wdt_start,
+ .stop = gti_wdt_stop,
+ .ping = gti_wdt_ping,
+ .set_timeout = gti_wdt_settimeout,
+ .set_pretimeout = gti_wdt_set_pretimeout,
+};
+
+static int gti_wdt_probe(struct platform_device *pdev)
+{
+ struct gti_wdt_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct watchdog_device *wdog_dev;
+ u64 max_pretimeout;
+ u32 wdt_idx;
+ int irq;
+ int err;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->base),
+ "reg property not valid/found\n");
+
+ err = gti_wdt_get_cntfrq(pdev, priv);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "GTI clock frequency not valid/found");
+
+ priv->data = of_device_get_match_data(dev);
+
+ /* default use last timer for watchdog */
+ priv->wdt_timer_idx = priv->data->gti_num_timers - 1;
+
+ err = of_property_read_u32(dev->of_node, "marvell,wdt-timer-index",
+ &wdt_idx);
+ if (!err) {
+ if (wdt_idx >= priv->data->gti_num_timers)
+ return dev_err_probe(&pdev->dev, err,
+ "GTI wdog timer index not valid");
+
+ priv->wdt_timer_idx = wdt_idx;
+ }
+
+ wdog_dev = &priv->wdev;
+ wdog_dev->info = &gti_wdt_ident,
+ wdog_dev->ops = &gti_wdt_ops,
+ wdog_dev->parent = dev;
+ /*
+ * Watchdog counter is 24 bit where lower 8 bits are zeros
+ * This counter decrements every 1024 clock cycles.
+ */
+ max_pretimeout = (GTI_CWD_WDOG_CNT_MASK >> GTI_CWD_WDOG_CNT_SHIFT);
+ max_pretimeout &= ~0xFFUL;
+ max_pretimeout = (max_pretimeout * 1024) / priv->clock_freq;
+ wdog_dev->pretimeout = max_pretimeout;
+
+ /* Maximum timeout is 3 times the pretimeout */
+ wdog_dev->max_timeout = max_pretimeout * 3;
+ /* Minimum first timeout (pretimeout) is 1, so min_timeout as 3 */
+ wdog_dev->min_timeout = 3;
+ wdog_dev->timeout = wdog_dev->pretimeout;
+
+ watchdog_set_drvdata(wdog_dev, priv);
+ platform_set_drvdata(pdev, priv);
+ gti_wdt_settimeout(wdog_dev, wdog_dev->timeout);
+ watchdog_stop_on_reboot(wdog_dev);
+ watchdog_stop_on_unregister(wdog_dev);
+
+ err = devm_watchdog_register_device(dev, wdog_dev);
+ if (err)
+ return err;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return dev_err_probe(&pdev->dev, irq, "IRQ resource not found\n");
+
+ err = devm_request_irq(dev, irq, gti_wdt_interrupt, 0,
+ pdev->name, &priv->wdev);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to register interrupt handler\n");
+
+ dev_info(dev, "Watchdog enabled (timeout=%d sec)\n", wdog_dev->timeout);
+ return 0;
+}
+
+static const struct of_device_id gti_wdt_of_match[] = {
+ { .compatible = "marvell,cn9670-wdt", .data = &match_data_octeontx2},
+ { .compatible = "marvell,cn10624-wdt", .data = &match_data_cn10k},
+ { },
+};
+MODULE_DEVICE_TABLE(of, gti_wdt_of_match);
+
+static struct platform_driver gti_wdt_driver = {
+ .driver = {
+ .name = "gti-wdt",
+ .of_match_table = gti_wdt_of_match,
+ },
+ .probe = gti_wdt_probe,
+};
+module_platform_driver(gti_wdt_driver);
+
+MODULE_AUTHOR("Bharat Bhushan <bbhushan2@marvell.com>");
+MODULE_DESCRIPTION("Marvell GTI watchdog driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/menz69_wdt.c b/drivers/watchdog/menz69_wdt.c
index 3c98030b9fcd..c7de30270043 100644
--- a/drivers/watchdog/menz69_wdt.c
+++ b/drivers/watchdog/menz69_wdt.c
@@ -153,7 +153,6 @@ MODULE_DEVICE_TABLE(mcb, men_z069_ids);
static struct mcb_driver men_z069_driver = {
.driver = {
.name = "z069-wdt",
- .owner = THIS_MODULE,
},
.probe = men_z069_probe,
.remove = men_z069_remove,
diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c
index 35d80cb39856..a48622d11ad7 100644
--- a/drivers/watchdog/meson_gxbb_wdt.c
+++ b/drivers/watchdog/meson_gxbb_wdt.c
@@ -22,7 +22,6 @@
#define GXBB_WDT_CTRL_CLKDIV_EN BIT(25)
#define GXBB_WDT_CTRL_CLK_EN BIT(24)
-#define GXBB_WDT_CTRL_EE_RESET BIT(21)
#define GXBB_WDT_CTRL_EN BIT(18)
#define GXBB_WDT_CTRL_DIV_MASK (BIT(18) - 1)
@@ -45,6 +44,10 @@ struct meson_gxbb_wdt {
struct clk *clk;
};
+struct wdt_params {
+ u32 rst;
+};
+
static int meson_gxbb_wdt_start(struct watchdog_device *wdt_dev)
{
struct meson_gxbb_wdt *data = watchdog_get_drvdata(wdt_dev);
@@ -140,8 +143,17 @@ static const struct dev_pm_ops meson_gxbb_wdt_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(meson_gxbb_wdt_suspend, meson_gxbb_wdt_resume)
};
+static const struct wdt_params gxbb_params = {
+ .rst = BIT(21),
+};
+
+static const struct wdt_params t7_params = {
+ .rst = BIT(22),
+};
+
static const struct of_device_id meson_gxbb_wdt_dt_ids[] = {
- { .compatible = "amlogic,meson-gxbb-wdt", },
+ { .compatible = "amlogic,meson-gxbb-wdt", .data = &gxbb_params, },
+ { .compatible = "amlogic,t7-wdt", .data = &t7_params, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, meson_gxbb_wdt_dt_ids);
@@ -150,6 +162,7 @@ static int meson_gxbb_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct meson_gxbb_wdt *data;
+ struct wdt_params *params;
u32 ctrl_reg;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
@@ -164,6 +177,8 @@ static int meson_gxbb_wdt_probe(struct platform_device *pdev)
if (IS_ERR(data->clk))
return PTR_ERR(data->clk);
+ params = (struct wdt_params *)of_device_get_match_data(dev);
+
platform_set_drvdata(pdev, data);
data->wdt_dev.parent = dev;
@@ -191,7 +206,7 @@ static int meson_gxbb_wdt_probe(struct platform_device *pdev)
/* Setup with 1ms timebase */
ctrl_reg |= ((clk_get_rate(data->clk) / 1000) &
GXBB_WDT_CTRL_DIV_MASK) |
- GXBB_WDT_CTRL_EE_RESET |
+ params->rst |
GXBB_WDT_CTRL_CLK_EN |
GXBB_WDT_CTRL_CLKDIV_EN;
diff --git a/drivers/watchdog/meson_wdt.c b/drivers/watchdog/meson_wdt.c
index 539feaa1f904..497496f64f55 100644
--- a/drivers/watchdog/meson_wdt.c
+++ b/drivers/watchdog/meson_wdt.c
@@ -11,11 +11,11 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/types.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 1c569be72ea2..867f9f311379 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -16,8 +16,8 @@
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/watchdog.h>
#include <linux/io.h>
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index a9c437598e7e..b2330b16b497 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -25,7 +25,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/types.h>
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index 2a079ca04aa3..05657dc1d36a 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -10,14 +10,13 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/watchdog.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
/* Register offsets for the Wdt device */
#define XWT_TWCSR0_OFFSET 0x0 /* Control/Status Register0 */
diff --git a/drivers/watchdog/pic32-dmt.c b/drivers/watchdog/pic32-dmt.c
index bc4ccddc75a3..ab0682492c85 100644
--- a/drivers/watchdog/pic32-dmt.c
+++ b/drivers/watchdog/pic32-dmt.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/pic32-wdt.c b/drivers/watchdog/pic32-wdt.c
index 6d1a00222991..1d282de312ef 100644
--- a/drivers/watchdog/pic32-wdt.c
+++ b/drivers/watchdog/pic32-wdt.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/pika_wdt.c b/drivers/watchdog/pika_wdt.c
index a98abd0d3146..782b8c23d99c 100644
--- a/drivers/watchdog/pika_wdt.c
+++ b/drivers/watchdog/pika_wdt.c
@@ -23,8 +23,8 @@
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
#define DRV_NAME "PIKA-WDT"
diff --git a/drivers/watchdog/pm8916_wdt.c b/drivers/watchdog/pm8916_wdt.c
index f4bfbffaf49c..f3fcbeb0852c 100644
--- a/drivers/watchdog/pm8916_wdt.c
+++ b/drivers/watchdog/pm8916_wdt.c
@@ -266,7 +266,7 @@ static struct platform_driver pm8916_wdt_driver = {
.probe = pm8916_wdt_probe,
.driver = {
.name = "pm8916-wdt",
- .of_match_table = of_match_ptr(pm8916_wdt_id_table),
+ .of_match_table = pm8916_wdt_id_table,
.pm = &pm8916_wdt_pm_ops,
},
};
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index d776474dcdf3..9e790f0c2096 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -11,7 +11,6 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
-#include <linux/of_device.h>
enum wdt_reg {
WDT_RST,
diff --git a/drivers/watchdog/rave-sp-wdt.c b/drivers/watchdog/rave-sp-wdt.c
index 2c95615b6354..5d1c2176d445 100644
--- a/drivers/watchdog/rave-sp-wdt.c
+++ b/drivers/watchdog/rave-sp-wdt.c
@@ -13,7 +13,7 @@
#include <linux/mfd/rave-sp.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/slab.h>
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index c04b383e1712..b293792a292a 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -14,7 +14,7 @@
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
index ce8f18e93aa9..8e1be7ba0103 100644
--- a/drivers/watchdog/rti_wdt.c
+++ b/drivers/watchdog/rti_wdt.c
@@ -14,6 +14,8 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
@@ -52,6 +54,11 @@
#define DWDST BIT(1)
+#define PON_REASON_SOF_NUM 0xBBBBCCCC
+#define PON_REASON_MAGIC_NUM 0xDDDDDDDD
+#define PON_REASON_EOF_NUM 0xCCCCBBBB
+#define RESERVED_MEM_MIN_SIZE 12
+
static int heartbeat = DEFAULT_HEARTBEAT;
/*
@@ -198,6 +205,11 @@ static int rti_wdt_probe(struct platform_device *pdev)
struct rti_wdt_device *wdt;
struct clk *clk;
u32 last_ping = 0;
+ struct device_node *node;
+ u32 reserved_mem_size;
+ struct resource res;
+ u32 *vaddr;
+ u64 paddr;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
@@ -284,6 +296,42 @@ static int rti_wdt_probe(struct platform_device *pdev)
}
}
+ node = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
+ if (node) {
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "No memory address assigned to the region.\n");
+ goto err_iomap;
+ }
+
+ /*
+ * If reserved memory is defined for watchdog reset cause.
+ * Readout the Power-on(PON) reason and pass to bootstatus.
+ */
+ paddr = res.start;
+ reserved_mem_size = resource_size(&res);
+ if (reserved_mem_size < RESERVED_MEM_MIN_SIZE) {
+ dev_err(dev, "The size of reserved memory is too small.\n");
+ ret = -EINVAL;
+ goto err_iomap;
+ }
+
+ vaddr = memremap(paddr, reserved_mem_size, MEMREMAP_WB);
+ if (!vaddr) {
+ dev_err(dev, "Failed to map memory-region.\n");
+ ret = -ENOMEM;
+ goto err_iomap;
+ }
+
+ if (vaddr[0] == PON_REASON_SOF_NUM &&
+ vaddr[1] == PON_REASON_MAGIC_NUM &&
+ vaddr[2] == PON_REASON_EOF_NUM) {
+ wdd->bootstatus |= WDIOF_CARDRESET;
+ }
+ memset(vaddr, 0, reserved_mem_size);
+ memunmap(vaddr);
+ }
+
watchdog_init_timeout(wdd, heartbeat, dev);
ret = watchdog_register_device(wdd);
diff --git a/drivers/watchdog/rza_wdt.c b/drivers/watchdog/rza_wdt.c
index fe6c2ed35e04..cb4901b3f777 100644
--- a/drivers/watchdog/rza_wdt.c
+++ b/drivers/watchdog/rza_wdt.c
@@ -9,9 +9,9 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/rzg2l_wdt.c b/drivers/watchdog/rzg2l_wdt.c
index d404953d0e0f..1741f98ca67c 100644
--- a/drivers/watchdog/rzg2l_wdt.c
+++ b/drivers/watchdog/rzg2l_wdt.c
@@ -11,7 +11,7 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 95416a9bdd4b..0b4bd883ff28 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -23,7 +23,6 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/delay.h>
@@ -379,10 +378,11 @@ static int s3c2410wdt_enable(struct s3c2410_wdt *wdt, bool en)
static int s3c2410wdt_keepalive(struct watchdog_device *wdd)
{
struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd);
+ unsigned long flags;
- spin_lock(&wdt->lock);
+ spin_lock_irqsave(&wdt->lock, flags);
writel(wdt->count, wdt->reg_base + S3C2410_WTCNT);
- spin_unlock(&wdt->lock);
+ spin_unlock_irqrestore(&wdt->lock, flags);
return 0;
}
@@ -399,10 +399,11 @@ static void __s3c2410wdt_stop(struct s3c2410_wdt *wdt)
static int s3c2410wdt_stop(struct watchdog_device *wdd)
{
struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd);
+ unsigned long flags;
- spin_lock(&wdt->lock);
+ spin_lock_irqsave(&wdt->lock, flags);
__s3c2410wdt_stop(wdt);
- spin_unlock(&wdt->lock);
+ spin_unlock_irqrestore(&wdt->lock, flags);
return 0;
}
@@ -411,8 +412,9 @@ static int s3c2410wdt_start(struct watchdog_device *wdd)
{
unsigned long wtcon;
struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd);
+ unsigned long flags;
- spin_lock(&wdt->lock);
+ spin_lock_irqsave(&wdt->lock, flags);
__s3c2410wdt_stop(wdt);
@@ -433,7 +435,7 @@ static int s3c2410wdt_start(struct watchdog_device *wdd)
writel(wdt->count, wdt->reg_base + S3C2410_WTDAT);
writel(wdt->count, wdt->reg_base + S3C2410_WTCNT);
writel(wtcon, wdt->reg_base + S3C2410_WTCON);
- spin_unlock(&wdt->lock);
+ spin_unlock_irqrestore(&wdt->lock, flags);
return 0;
}
diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c
index aeee934ca51b..13e72918338a 100644
--- a/drivers/watchdog/sama5d4_wdt.c
+++ b/drivers/watchdog/sama5d4_wdt.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
@@ -255,6 +254,7 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
struct sama5d4_wdt *wdt;
void __iomem *regs;
u32 irq = 0;
+ u32 reg;
int ret;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
@@ -305,6 +305,12 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
watchdog_init_timeout(wdd, wdt_timeout, dev);
+ reg = wdt_read(wdt, AT91_WDT_MR);
+ if (!(reg & AT91_WDT_WDDIS)) {
+ wdt->mr &= ~AT91_WDT_WDDIS;
+ set_bit(WDOG_HW_RUNNING, &wdd->status);
+ }
+
ret = sama5d4_wdt_init(wdt);
if (ret)
return ret;
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
index fd3cfdda4949..421ebcda62e6 100644
--- a/drivers/watchdog/sbsa_gwdt.c
+++ b/drivers/watchdog/sbsa_gwdt.c
@@ -43,10 +43,9 @@
#include <linux/io.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/starfive-wdt.c b/drivers/watchdog/starfive-wdt.c
index 8058fca4d05d..5f501b41faf9 100644
--- a/drivers/watchdog/starfive-wdt.c
+++ b/drivers/watchdog/starfive-wdt.c
@@ -8,7 +8,8 @@
#include <linux/clk.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/watchdog.h>
@@ -526,7 +527,6 @@ static void starfive_wdt_shutdown(struct platform_device *pdev)
starfive_wdt_pm_stop(&wdt->wdd);
}
-#ifdef CONFIG_PM_SLEEP
static int starfive_wdt_suspend(struct device *dev)
{
struct starfive_wdt *wdt = dev_get_drvdata(dev);
@@ -556,9 +556,7 @@ static int starfive_wdt_resume(struct device *dev)
return starfive_wdt_start(wdt);
}
-#endif /* CONFIG_PM_SLEEP */
-#ifdef CONFIG_PM
static int starfive_wdt_runtime_suspend(struct device *dev)
{
struct starfive_wdt *wdt = dev_get_drvdata(dev);
@@ -574,11 +572,10 @@ static int starfive_wdt_runtime_resume(struct device *dev)
return starfive_wdt_enable_clock(wdt);
}
-#endif /* CONFIG_PM */
static const struct dev_pm_ops starfive_wdt_pm_ops = {
- SET_RUNTIME_PM_OPS(starfive_wdt_runtime_suspend, starfive_wdt_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(starfive_wdt_suspend, starfive_wdt_resume)
+ RUNTIME_PM_OPS(starfive_wdt_runtime_suspend, starfive_wdt_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(starfive_wdt_suspend, starfive_wdt_resume)
};
static const struct of_device_id starfive_wdt_match[] = {
@@ -594,7 +591,7 @@ static struct platform_driver starfive_wdt_driver = {
.shutdown = starfive_wdt_shutdown,
.driver = {
.name = "starfive-wdt",
- .pm = &starfive_wdt_pm_ops,
+ .pm = pm_ptr(&starfive_wdt_pm_ops),
.of_match_table = starfive_wdt_match,
},
};
diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
index 570a71509d2a..d9fd50df9802 100644
--- a/drivers/watchdog/stm32_iwdg.c
+++ b/drivers/watchdog/stm32_iwdg.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
@@ -288,7 +287,7 @@ static struct platform_driver stm32_iwdg_driver = {
.probe = stm32_iwdg_probe,
.driver = {
.name = "iwdg",
- .of_match_table = of_match_ptr(stm32_iwdg_of_match),
+ .of_match_table = stm32_iwdg_of_match,
},
};
module_platform_driver(stm32_iwdg_driver);
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index 6cf82922d3fb..b85354a99582 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -18,7 +18,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index d4c5a736fdcb..5b55ccae06d4 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -161,7 +161,7 @@ static int watchdog_reboot_notifier(struct notifier_block *nb,
struct watchdog_device *wdd;
wdd = container_of(nb, struct watchdog_device, reboot_nb);
- if (code == SYS_DOWN || code == SYS_HALT) {
+ if (code == SYS_DOWN || code == SYS_HALT || code == SYS_POWER_OFF) {
if (watchdog_hw_running(wdd)) {
int ret;
diff --git a/drivers/watchdog/xilinx_wwdt.c b/drivers/watchdog/xilinx_wwdt.c
index 2585038d5575..d271e2e8d6e2 100644
--- a/drivers/watchdog/xilinx_wwdt.c
+++ b/drivers/watchdog/xilinx_wwdt.c
@@ -9,9 +9,10 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
+#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <linux/watchdog.h>
/* Max timeout is calculated at 100MHz source clock */
@@ -71,7 +72,7 @@ static int xilinx_wwdt_start(struct watchdog_device *wdd)
/* Calculate timeout count */
time_out = xdev->freq * wdd->timeout;
- closed_timeout = (time_out * xdev->close_percent) / 100;
+ closed_timeout = div_u64(time_out * xdev->close_percent, 100);
open_timeout = time_out - closed_timeout;
wdd->min_hw_heartbeat_ms = xdev->close_percent * 10 * wdd->timeout;
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 3bdd5b59661d..1b2136fe0fa5 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <linux/irqnr.h>
#include <linux/pci.h>
+#include <linux/rcupdate.h>
#include <linux/spinlock.h>
#include <linux/cpuhotplug.h>
#include <linux/atomic.h>
@@ -96,6 +97,7 @@ enum xen_irq_type {
struct irq_info {
struct list_head list;
struct list_head eoi_list;
+ struct rcu_work rwork;
short refcnt;
u8 spurious_cnt;
u8 is_accounted;
@@ -147,22 +149,12 @@ const struct evtchn_ops *evtchn_ops;
static DEFINE_MUTEX(irq_mapping_update_lock);
/*
- * Lock protecting event handling loop against removing event channels.
- * Adding of event channels is no issue as the associated IRQ becomes active
- * only after everything is setup (before request_[threaded_]irq() the handler
- * can't be entered for an event, as the event channel will be unmasked only
- * then).
- */
-static DEFINE_RWLOCK(evtchn_rwlock);
-
-/*
* Lock hierarchy:
*
* irq_mapping_update_lock
- * evtchn_rwlock
- * IRQ-desc lock
- * percpu eoi_list_lock
- * irq_info->lock
+ * IRQ-desc lock
+ * percpu eoi_list_lock
+ * irq_info->lock
*/
static LIST_HEAD(xen_irq_list_head);
@@ -306,6 +298,22 @@ static void channels_on_cpu_inc(struct irq_info *info)
info->is_accounted = 1;
}
+static void delayed_free_irq(struct work_struct *work)
+{
+ struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
+ rwork);
+ unsigned int irq = info->irq;
+
+ /* Remove the info pointer only now, with no potential users left. */
+ set_info_for_irq(irq, NULL);
+
+ kfree(info);
+
+ /* Legacy IRQ descriptors are managed by the arch. */
+ if (irq >= nr_legacy_irqs())
+ irq_free_desc(irq);
+}
+
/* Constructors for packed IRQ information. */
static int xen_irq_info_common_setup(struct irq_info *info,
unsigned irq,
@@ -668,33 +676,36 @@ static void xen_irq_lateeoi_worker(struct work_struct *work)
eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
- read_lock_irqsave(&evtchn_rwlock, flags);
+ rcu_read_lock();
while (true) {
- spin_lock(&eoi->eoi_list_lock);
+ spin_lock_irqsave(&eoi->eoi_list_lock, flags);
info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
eoi_list);
- if (info == NULL || now < info->eoi_time) {
- spin_unlock(&eoi->eoi_list_lock);
+ if (info == NULL)
+ break;
+
+ if (now < info->eoi_time) {
+ mod_delayed_work_on(info->eoi_cpu, system_wq,
+ &eoi->delayed,
+ info->eoi_time - now);
break;
}
list_del_init(&info->eoi_list);
- spin_unlock(&eoi->eoi_list_lock);
+ spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
info->eoi_time = 0;
xen_irq_lateeoi_locked(info, false);
}
- if (info)
- mod_delayed_work_on(info->eoi_cpu, system_wq,
- &eoi->delayed, info->eoi_time - now);
+ spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
- read_unlock_irqrestore(&evtchn_rwlock, flags);
+ rcu_read_unlock();
}
static void xen_cpu_init_eoi(unsigned int cpu)
@@ -709,16 +720,15 @@ static void xen_cpu_init_eoi(unsigned int cpu)
void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
{
struct irq_info *info;
- unsigned long flags;
- read_lock_irqsave(&evtchn_rwlock, flags);
+ rcu_read_lock();
info = info_for_irq(irq);
if (info)
xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
- read_unlock_irqrestore(&evtchn_rwlock, flags);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
@@ -732,6 +742,7 @@ static void xen_irq_init(unsigned irq)
info->type = IRQT_UNBOUND;
info->refcnt = -1;
+ INIT_RCU_WORK(&info->rwork, delayed_free_irq);
set_info_for_irq(irq, info);
/*
@@ -789,31 +800,18 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
static void xen_free_irq(unsigned irq)
{
struct irq_info *info = info_for_irq(irq);
- unsigned long flags;
if (WARN_ON(!info))
return;
- write_lock_irqsave(&evtchn_rwlock, flags);
-
if (!list_empty(&info->eoi_list))
lateeoi_list_del(info);
list_del(&info->list);
- set_info_for_irq(irq, NULL);
-
WARN_ON(info->refcnt > 0);
- write_unlock_irqrestore(&evtchn_rwlock, flags);
-
- kfree(info);
-
- /* Legacy IRQ descriptors are managed by the arch. */
- if (irq < nr_legacy_irqs())
- return;
-
- irq_free_desc(irq);
+ queue_rcu_work(system_wq, &info->rwork);
}
/* Not called for lateeoi events. */
@@ -1704,14 +1702,21 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
generic_handle_irq(irq);
}
-static int __xen_evtchn_do_upcall(void)
+int xen_evtchn_do_upcall(void)
{
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
int ret = vcpu_info->evtchn_upcall_pending ? IRQ_HANDLED : IRQ_NONE;
int cpu = smp_processor_id();
struct evtchn_loop_ctrl ctrl = { 0 };
- read_lock(&evtchn_rwlock);
+ /*
+ * When closing an event channel the associated IRQ must not be freed
+ * until all cpus have left the event handling loop. This is ensured
+ * by taking the rcu_read_lock() while handling events, as freeing of
+ * the IRQ is handled via queue_rcu_work() _after_ closing the event
+ * channel.
+ */
+ rcu_read_lock();
do {
vcpu_info->evtchn_upcall_pending = 0;
@@ -1724,7 +1729,7 @@ static int __xen_evtchn_do_upcall(void)
} while (vcpu_info->evtchn_upcall_pending);
- read_unlock(&evtchn_rwlock);
+ rcu_read_unlock();
/*
* Increment irq_epoch only now to defer EOIs only for
@@ -1735,24 +1740,7 @@ static int __xen_evtchn_do_upcall(void)
return ret;
}
-
-void xen_evtchn_do_upcall(struct pt_regs *regs)
-{
- struct pt_regs *old_regs = set_irq_regs(regs);
-
- irq_enter();
-
- __xen_evtchn_do_upcall();
-
- irq_exit();
- set_irq_regs(old_regs);
-}
-
-int xen_hvm_evtchn_do_upcall(void)
-{
- return __xen_evtchn_do_upcall();
-}
-EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
+EXPORT_SYMBOL_GPL(xen_evtchn_do_upcall);
/* Rebind a new event channel to an existing irq. */
void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index fcc819131572..544d3f9010b9 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -64,7 +64,7 @@ static uint64_t get_callback_via(struct pci_dev *pdev)
static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
{
- return xen_hvm_evtchn_do_upcall();
+ return xen_evtchn_do_upcall();
}
static int xen_allocate_irq(struct pci_dev *pdev)